示例#1
0
    def __call__(cls, in_filename, out_filename, pc_filename, clip_sigma):
        """
        Fit coefficients of sky templates to mini-sky FITS image from an exposure.

        :Parameters:
            - `in_filename`: filename for exposure mini-sky image
            - `out_filename`: filename for the output coefficients and residual sky data
            - `pc_filename`: filename for the stored mini-sky principal component array
            - `clip_sigma`: Number of sigma to mark outliers ignored from fitting & stats
        """

        logger.info('Fitting sky')

        mini = skyinfo.MiniDecam.load(in_filename)
        templates = skyinfo.MiniskyPC.load(pc_filename)
        try:
            # Insure using the correct filter's PCA
            items_must_match(mini.header, templates.header, 'BAND')
        except:
            return 1
        templates.fit(mini, clip_sigma)
        mini.save(out_filename)

        # Create a one-line binary fits table to hold the coefficients
        logger.debug('Finished sky fitting')
        ret_code = 0
        return ret_code
    def __call__(cls, image):
        """Convert pixel values from ADU to electrons, including weight or variance
        image and critical keywords.

        :Parameters:
            - `image`: the DESImage for pixel values to be converted

        Applies the correction "in place"
        """

        logger.info('Gain Correcting Image')

        saturate = 0.
        gains = []
        for amp in decaminfo.amps:
            sec = section2slice(image['DATASEC' + amp])
            gain = image['GAIN' + amp]
            gains.append(gain)
            image.data[sec] *= gain

            # Adjust the weight or variance image if present:
            if image.weight is not None:
                image.weight[sec] *= 1. / (gain * gain)
            if image.variance is not None:
                image.variance[sec] *= gain * gain

            # Adjust keywords
            image['GAIN' + amp] = image['GAIN' + amp] / gain
            image['SATURAT' + amp] = image['SATURAT' + amp] * gain
            saturate = max(saturate, image['SATURAT' + amp])
            # Scale the SKYVAR if it's already here
            kw = 'SKYVAR' + amp
            if kw in image.header.keys():
                image[kw] = image[kw] * gain * gain
            # The FLATMED will keep track of rescalings *after* gain:
            image['FLATMED' + amp] = 1.

        # The SATURATE keyword is assigned to maximum of the two amps.
        image['SATURATE'] = saturate

        # Some other keywords that we will adjust crudely with mean gain
        # if they are present:
        gain = np.mean(gains)
        for kw in ('SKYBRITE', 'SKYSIGMA'):
            if kw in image.header.keys():
                image[kw] = image[kw] * gain

        # One other keyword to adjust:
        image['BUNIT'] = 'electrons'

        logger.debug('Finished applying Gain Correction')
        ret_code = 0
        return ret_code
示例#3
0
    def __call__(cls, image, null_mask, resaturate):
        """Create or update the mask plane of an image

        :Parameters:
            - `image`: the DESImage to operate upon.  Mask plane is created if absent
            - `null_mask`: Integer or list of BADPIX bit names that, when set in mask image,
                         will put weight=0 for that pixel.
            - `resaturate`: if True, set data for every pixel with BADPIX_SATURATE set
                          to a value above the SATURATE keyword

        """

        if image.mask is None:
            raise NullWeightsError('Mask is missing in image')

        if null_mask != 0:
            logger.info('Nulling weight image from mask bits')

            if image.weight is None and image.variance is None:
                raise NullWeightsError('Weight is missing in image')
            weight = image.get_weight()
            kill = np.array(image.mask & null_mask, dtype=bool)
            weight[kill] = 0.
            image['HISTORY'] = time.asctime(time.localtime()) + \
                              ' Null weights with mask 0x{:04X}'.format(null_mask)
            logger.debug('Finished nulling weight image')

        if resaturate:
            logger.info('Re-saturating pixels from mask bits')
            sat = np.array(image.mask & maskbits.BADPIX_SATURATE, dtype=bool)
            try:
                saturation_level = image['SATURATE']
            except (ValueError, KeyError):
                # If there is no SATURATE, try taking max of amps
                maxsat = 0.
                try:
                    for amp in decaminfo.amps:
                        maxsat = max(maxsat, image['SATURAT' + amp])
                except:
                    logger.error('SATURATx header keywords not found')
                    raise NullWeightsError(
                        'SATURATx header keywords not found')
                saturation_level = maxsat
                logger.warning(
                    'Taking SATURATE as max of single-amp SATURATx values')

            image.data[sat] = 1.01 * saturation_level
            image['HISTORY'] = time.asctime(time.localtime()) + \
                              ' Set saturated pixels to {:.0f}'.format(saturation_level)
            logger.debug('Finished nulling weight image')

        ret_code = 0
        return ret_code
示例#4
0
    def __call__(cls, in_filename, ref_filename, out_filename, edge=None):
        """
        Compare two compressed DES images, report size of deviations.

        :Parameters:
            - `in_filename`: the compressed DES image ("mini-sky" format) to check
            - `ref_filename`: the reference image to compare to, must have same compression as in_filename
            - `out_filename`: output image showing fractional residuals of input to reference after matching
                              normalizations.  The header of the output image will also contain keywords RMS
                              and WORST giving the rms and maximum fractional deviations, and a keyword FACTOR
                              giving the overall flux factor used to normalize them.
            - 'edge':        number of compressed pixels along each CCD edge to ignore in calculating stats
        """

        logger.info('Comparing compressed image' + in_filename +
                    " to reference " + ref_filename)

        indata = skyinfo.MiniDecam.load(in_filename)
        ref = skyinfo.MiniDecam.load(ref_filename)

        if indata.blocksize != ref.blocksize or indata.invalid != ref.invalid or indata.halfS7 != ref.halfS7:
            raise skyinfo.SkyError(
                "Input and reference are not matching compressions of DECam")

        resid = indata.vector() / ref.vector()
        if edge is not None and edge > 0:
            stats = resid[indata.edges(edge).vector() == 0]
        else:
            stats = np.array(resid)
        factor = np.median(stats)
        resid /= factor
        resid -= 1.
        indata.fill_from(resid)
        stats /= factor
        stats -= 1.
        rms = np.std(stats)
        worst = np.max(np.abs(stats))
        indata.header['FACTOR'] = factor
        indata.header['RMS'] = rms
        indata.header['WORST'] = worst
        if edge is not None and edge > 0:
            indata.header['EDGE'] = edge
        indata.save(out_filename)

        logger.info('Normalization factor: %f', factor)
        logger.info('RMS deviation: %f', rms)
        logger.info('Worst deviation: %f', worst)

        # Create a one-line binary fits table to hold the coefficients
        logger.debug('Finished image comparison')
        ret_code = 0
        return ret_code
示例#5
0
    def __call__(cls, in_filenames, out_filename, mask_value, invalid):
        """
        Produce compressed image of sky background for entire exposure by
        combining minisky FITS images for single CCDs.  Missing input minisky
        will only generate a warning.

        :Parameters:
            - `in_filenames`: list of filenames of single-chip sky images
            - `out_filename`: filename for the output combined sky image
            - `mask_value`: value inserted into sky pixels with no data
            - `invalid`: list of detpos values for CCDs to be left out of sky image
        """

        logger.info('Combining sky')

        out = None
        # Insert each input image into the output image
        for f in in_filenames:
            try:
                small = DESDataImage.load(f)
            except (ValueError, IOError):
                # A missing minisky file is not fatal:
                logger.warning('SkyCombine could not load minisky ' + f)
                continue
            if small['DETPOS'].strip() in invalid:
                # Skip any unwanted CCDs
                continue
            blocksize = small['BLOCKSIZ']
            if out is None:
                out = skyinfo.MiniDecam(blocksize, mask_value, invalid)
                out.copy_header_info(small, cls.propagate, require=False)

            if blocksize != out.blocksize:
                raise skyinfo.SkyError('Mismatched blocksizes for SkyCombine')
            out.fill(small.data, small['DETPOS'].strip())
            # Issue warnings for mismatches of data, but skip if
            # quantities are not in headers
            for k in ('BAND', 'NITE', 'EXPNUM'):
                try:
                    v1 = out[k]
                    v2 = small[k]
                    if v1 != v2:
                        logger.warning('Mismatched {:s} in file {:s}: {} vs {}'.\
                                       format(k, f, v1, v2))
                except:
                    pass

        out.save(out_filename)

        logger.debug('Finished sky combination')
        ret_code = 0
        return ret_code
示例#6
0
    def __call__(cls, image):
        """
        This is currently written with instance of CTI (Charge Transfer Inefficiency) in mind (that occuring
        for CCD=41 during DES Y6).  It may be generalizable if further cases occur (but not all the parts have
        yet been written with a generalized case in mind).  When CTI is detected the entire amplifier in
        question will be masked with BADPIX_BADAMP
        """
        #       A simple dictionary with parameters for the only known case of CTI
        #           Currently explist is set to encompass DES Y6 (20180815 and beyond (expnum>765533)
        #           This could be tightened to a range as no CTI has been detected after November 2018 but
        #           it has not yet been systematicall watched for.
        CTI = {41: {'amp': 'B', 'explist': '765533-'}}

        if image['CCDNUM'] in CTI:

            #
            #           Currently can re-use the function developed for lightbulb checking
            #
            check_for_light = lb.check_lightbulb_explist(
                image['EXPNUM'], CTI[image['CCDNUM']]['explist'])
            if check_for_light:
                logger.info(
                    ' CTI: Expnum={:d}, CCDNUM={:d}, in proscribed range checking for CTI'
                    .format(image['EXPNUM'], image['CCDNUM']))
                ctiDict = cti.check_cti(image, CTI[image['CCDNUM']], verbose=1)
                #
                #               Current criterion:
                #                   Looks for horizontal striping in image (with large deficits in counts that are not
                #                       associated with an edge-bleed.
                #                   Examines auto-correlation for lags in the x-direction at 5, 7, and 15 pixel offsets
                #                       and compares to lags obtained from measurments in the diaganol direction.
                #                   Looks for evidence of excessive power in the ratio between x-direction and diagnol sets
                #                       that indicative that charge is bleeding in the x-direction.
                #
                if ctiDict['isCTI']:
                    image = cti.mask_cti(image,
                                         CTI[image['CCDNUM']],
                                         ctiDict,
                                         verbose=1)
                    logger.info(
                        ' CTI: Detected CTI for Exp={:d}, CCD={:d}, Amp={:s}'.
                        format(image['EXPNUM'], image['CCDNUM'],
                               CTI[image['CCDNUM']]['amp']))
                    image.write_key(
                        'DES_CTI', 'Masked DATASEC{:s}'.format(
                            CTI[image['CCDNUM']]['amp']))

        logger.debug('Finished checking and applying mask CTI')
        ret_code = 0
        return ret_code
示例#7
0
    def __call__(cls, image, bpm_im):
        """Apply a bad pixel mask to a DES image

        :Parameters:
            - `image`: the DESImage to which the BPM is to be applied
            - `bpm_im`: the DESImage with the bad pixel mask

        Applies the correction "in place"
        """

        logger.info('Applying BPM')
        ret_code = obpm_c(image.cstruct, bpm_im.cstruct)
        logger.debug('Finished applying BPM')
        return ret_code
示例#8
0
    def __call__(cls, image, skyfilename, blocksize, bitmask):
        """Produce compressed image of sky background

        :Parameters:
            - `image`: the DESImage to be compressed.
            - `skyfilename`: filename for the output compressed sky image
            - `blocksize`: side length of squares in which medians are taken
            - `bitmask`: Bitmask that will be or'ed with mask plane of image (if
                          any) to mark pixels to be ignored in calculating block
                          median.
        """

        logger.info('Compressing sky')

        # Raise exception if image is not a multiple of blocksize (or reshape fails)
        if ((image.data.shape[1] % blocksize != 0)
                or (image.data.shape[0] % blocksize != 0)):
            raise skyinfo.SkyError(
                'blocksize {:d} does not evenly divide image ({:d}x{:d})'.
                format(blocksize, image.data.shape[0], image.data.shape[1]))
            exit(1)

        nx = int(image.data.shape[1] / blocksize)
        ny = int(image.data.shape[0] / blocksize)
        # Apply bit mask to the mask plane if any.  Superpixels
        # with no unmasked pixels will be filled with value -1
        if image.mask is None:
            sky = np.median(image.data.reshape(ny, blocksize, nx, blocksize)\
                            .swapaxes(1, 2)\
                            .reshape(ny, nx, blocksize * blocksize), axis=2)
        else:
            data = np.ma.array(image.data,
                               mask=(image.mask & bitmask),
                               fill_value=-1.)
            sky = np.ma.median(data.reshape(ny, blocksize, nx, blocksize)\
                               .swapaxes(1, 2)\
                               .reshape(ny, nx, blocksize * blocksize), axis=2)
            sky = np.ma.getdata(sky)

        # Create HDU for output image, add some header info, save output to file
        outimage = DESDataImage(sky)
        outimage['BLOCKSIZ'] = blocksize
        outimage.copy_header_info(image, cls.propagate, require=False)
        ## ?? catch exception from write error below?
        outimage.save(skyfilename)

        logger.debug('Finished sky compression')
        ret_code = 0
        return ret_code
    def __call__(cls, image, bias_im):
        """Apply a bias correction to an image

        :Parameters:
            - `image`: the DESImage to apply a bias correction
            - `bias_im`:  the bias correction image to apply

        Applies the correction "in place." Also creates BAND and NITE
        keywords if they are not present.
        """

        logger.info('Applying Bias')
        # Check that bias and data are from same CCD
        try:
            items_must_match(image, bias_im, 'CCDNUM')
        except:
            return 1
        image.data -= bias_im.data
        # If we have two weight images, add variance of the bias to the image's
        if (image.weight is not None or image.variance is not None):
            if bias_im.weight is not None:
                var = image.get_variance()
                var += 1. / bias_im.weight
            elif bias_im.variance is not None:
                var = image.get_variance()
                var += bias_im.variance
        logger.debug('Finished applying Bias')
        if bias_im.sourcefile is None:
            image.write_key('BIASFIL',
                            'UNKNOWN',
                            comment='Bias correction file')
        else:
            image.write_key('BIASFIL',
                            path.basename(bias_im.sourcefile),
                            comment='Bias correction file')
        # Also create the BAND and NITE keywords if they are not present
        try:
            image['BAND']
        except:
            image['BAND'] = decaminfo.get_band(image['FILTER'])
        try:
            image['NITE']
        except:
            image['NITE'] = decaminfo.get_nite(image['DATE-OBS'])

        ret_code = 0
        return ret_code
示例#10
0
    def __call__(cls, image, normfactor, ampborder):
        """Apply a flat field correction to an image

        :Parameters:
            - `image`: input image (image to be normalized)
            - `normval`: normalization value to use (with respect to SCALMEAN keyword)
            - `ampborder`: area around periphery of AmpRegion to use when calculating statistics

        Applies the correction to each input and writes a separate output file.
        """

        logger.info('Normalizing Flat Image')
        scalmean = image['SCALMEAN']
        nfactor = scalmean / normfactor
        nfactor2 = nfactor * nfactor
        logger.info('SCALMEAN=%.2f NORMFACTOR=%.2f NORMALIZATION=%.5f',
                    scalmean, normfactor, nfactor)
        image['NORMFACT'] = normfactor
        #
        image.data *= nfactor
        if image.weight is not None:
            image.weight *= nfactor2
        elif image.variance is not None:
            image.variance /= nfactor2


#
#       Create keywords that reflect the median value of the flat on each amp.
#
        for amp in decaminfo.amps:
            datasecn = scan_fits_section(image, 'DATASEC' + amp)
            datasecn[0] += ampborder
            datasecn[1] -= ampborder
            datasecn[2] += ampborder
            datasecn[3] -= ampborder
            image['FLATMED' + amp] = np.median(
                image.data[datasecn[2]:datasecn[3] + 1,
                           datasecn[0]:datasecn[1] + 1])

        logger.debug('Finished applying normalization to Flat')
        ret_code = 0

        return ret_code
示例#11
0
    def __call__(cls, image, comp_im):
        """Take difference between image and another `comparison` image "

        :Parameters:
            - `image`: the DESImage to apply a bias correction
            - `comp_im`:  comparison image to be subtracted

        Applies the correction "in place"
        """

        logger.info('Taking Difference')
        image.data -= comp_im.data
        # If we have two weight images, add variance of the bias to the image's
        if image.weight is not None or image.variance is not None:
            if comp_im.weight is not None:
                var = image.get_variance()
                var += 1. / comp_im.weight
            elif comp_im.variance is not None:
                var = image.get_variance()
                var += comp_im.variance
        logger.debug('Finished taking difference')
        ret_code = 0
        return ret_code
示例#12
0
    def __call__(cls, inlist, ccdnorm, ampborder):
        """Apply a flat field correction to an image

        :Parameters:
            - `inlist`: list of input and output flat DESImage(s) to normalize
            - `flat_im`:  the flat correction image to apply

        Applies the correction to each input and writes a separate output file.
        """

        logger.info('Initial Read of Flat Field Headers')
        #
        norm_list = []
        scalmean_list = []
        normval = None
        #
        try:
            f1 = open(inlist, 'r')
            for line in f1:
                line = line.strip()
                columns = line.split()
                if os.path.isfile(columns[0]):
                    tmp_dict = {}
                    tmp_dict['fname'] = columns[0]
                    tmp_dict['oname'] = columns[1]
                    if tmp_dict['fname'][-2:] == "fz":
                        sci_hdu = 1  # for .fz
                    else:
                        sci_hdu = 0  # for .fits (or .gz)
                    temp_fits = fitsio.FITS(tmp_dict['fname'], 'r')
                    temp_head = temp_fits[sci_hdu].read_header()
                    #
                    #                   Get the CCD number
                    #
                    try:
                        tmp_dict['ccdnum'] = int(temp_head['CCDNUM'])

                    except:
                        if ccdnorm < 1:
                            tmp_dict['ccdnum'] = -1

                        else:
                            print(
                                "Warning: image {:s} did not have a CCDNUM keyword!"
                                .format(tmp_dict['fname']))

#
#                   Get the SCALMEAN value
#
                    try:
                        tmp_dict['scalmean'] = float(temp_head['SCALMEAN'])
                    except:
                        raise ValueError(
                            "Image %s did not have a SCALMEAN keyword. Aborting!"
                            % tmp_dict['fname'])
#
#                   Finished first header census
#                   Save file info and scalmean's to a list
#
                    norm_list.append(tmp_dict)
                    scalmean_list.append(tmp_dict['scalmean'])
                    temp_fits.close()
            f1.close()
        except:
            #
            #           Input file was not present.
            #
            #            (type, value, trback)=sys.exc_info()
            #            print("{:s} {:s} {:s} \n".format(inlist,type,value))
            raise IOError("File not found.  Missing input list %s " % inlist)
#
#       All information is now present. Determine the value that will be used in normalization.
#
        if ccdnorm > 1:
            for tmp_rec in norm_list:
                if normval is None:
                    if tmp_rec['ccdnum'] == ccdnorm:
                        normval = tmp_rec['ccdnum']
                else:
                    if tmp_rec['ccdnum'] == ccdnorm:
                        print(
                            "Warning: More than one image with CCDNUM={:d} identified"
                        )
            if normval is None:
                raise ValueError(
                    "No image with CCDNUM=%d found among input list. Aborting!"
                    % ccdnorm)
            logger.info('Normaliztion: %.2f set based on value from CCD %d ',
                        normval, ccdnorm)
        else:
            a_scalmean = np.array(scalmean_list)
            normval = np.median(a_scalmean)
            logger.info(
                'Normaliztion: %.2f set based on median value of the ensemble ',
                normval)


#
#       Go ahead and normalize the set
#
        logger.info('Normalizing list')
        for tmp_record in norm_list:
            logger.info('Working on image: %s ', tmp_record['fname'])
            image = DESImage.load(tmp_record['fname'])
            nfactor = tmp_record['scalmean'] / normval
            nfactor2 = nfactor * nfactor
            logger.info(' CCD: %2d, relative normalization factor: %.5f ',
                        tmp_record['ccdnum'], nfactor)
            image.data *= nfactor
            image.weight *= nfactor2
            #
            #           Create keywords that reflect the median value of the flat on each amp.
            #
            for amp in decaminfo.amps:
                datasecn = scan_fits_section(image, 'DATASEC' + amp)
                datasecn[0] += ampborder
                datasecn[1] -= ampborder
                datasecn[2] += ampborder
                datasecn[3] -= ampborder
                image['FLATMED' + amp] = np.median(
                    image.data[datasecn[2]:datasecn[3] + 1,
                               datasecn[0]:datasecn[1] + 1])

            DESImage.save(image, tmp_record['oname'])

        logger.debug('Finished applying Flat')
        ret_code = 0

        return ret_code
示例#13
0
    def __call__(cls, image, bpm):
        """
        Find and fix correctable columns in the image as indicated by the BPMDEF_CORR
        bit being set in the bpm image.  The algorithm is taken from John Marriner's
        fixCol() in old mask_utils.c.  The affected pixels in the column have a constant
        added to them that makes their median value equal that in neighboring columns.

        :Parameters:
            - `image`: DESImage to fix.
            - `bpm`: DESBPMImage for this CCD
        """
        #Modified 7/7/2016
        #Use clipLine to fit column slope
        #Change NEIGHBORS from 10 to 6
        #Change VAR_TOLERANCE from 0.5 to 0.25
        #Remove lower limit on correction
        #Change mask bit usage
        #Correct all correctable pixels, but use only "good" pixels to compute correction

        logger.info('Fixing columns')

        NEIGHBORS = 6  # Number of comparison columns to seek
        RANGE = 12  # Farthest away to look for comparison columns
        # Largest allowable fractional difference in variance between the fixable column
        # and its neighbors:
        VAR_TOLERANCE = 0.25
        #We require the number of sky pixels in the target column to be not less than this
        #fraction of the average number of sky pixels in the reference columns
        #If the pixel values of a column vary in a bi-stable way, the high pixels may be
        #interpreted as "objects" and the high variance may not be noticed.
        COUNT_TOL = 0.85

        if image.mask is None:
            raise FixColumnsError('Input image does not have mask')
        # Check that dome and data are from same CCD
        try:
            items_must_match(image, bpm, 'CCDNUM')
        except:
            return 1

        # A "fixable" column will have CORR flag set at either start or end of column
        fixable = np.where(
            np.logical_or(bpm.mask[0, :] & cls.CORR,
                          bpm.mask[-1, :] & cls.CORR))[0]
        #Just an array that gives the ordinal number of each row in the column (for fitting the slope)
        colord = np.arange(4096)
        #Don't use slope in clipLine
        doslope = False

        for icol in fixable:
            # The fixable column is a hot bias pixel type if COL_BIAS is set
            #hotbias = np.logical_or(bpm.mask[0,icol] & maskbits.BPMDEF_BIAS_COL,
            #                             bpm.mask[-1,icol] & maskbits.BPMDEF_BIAS_COL)
            # Which pixels in the column are fixable?
            # They need to have the CORR flag set (other BPM bits specified by BPMOK are allowed)
            # Checking for valid NAN's or INF's should no longer be necessary, but is harmless
            # Also, we do not use any bad pixels.
            coldata = image.data[:, icol]
            colbpm = bpm.mask[:, icol]
            ignore = np.logical_or(colbpm & cls.BPMBAD, np.isinf(coldata))
            ignore |= np.isnan(coldata)
            corr_rows = np.logical_and(colbpm & cls.CORR, ~ignore)
            ignore = np.logical_or(ignore,
                                   image.mask[:, icol] & ~maskbits.BADPIX_BPM)
            use_rows = np.logical_and(colbpm & cls.CORR, ~ignore)

            if np.count_nonzero(use_rows) < cls.MINIMUM_PIXELS:
                logger.info(
                    "Not enough pixels to fix column {:d}".format(icol))
                continue

            # Get a robust estimate of mean level and slope in target column
            y = colord[use_rows]
            z = coldata[use_rows]
            col_mean, _, col_var, col_n = cls._clippedLine(
                y, z, doslope, cls.CLIP_SIGMA)
            if col_var <= 0.0:
                logger.info(
                    "Error in clipped line fit for column {:d}".format(icol))
                continue

            # Now want to collect stats on up to NEIGHBORS nearby columns
            norm_stats = []
            ilow = icol
            ihigh = icol
            low_limit = max(icol - RANGE, 0)
            high_limit = min(icol + RANGE, image.data.shape[1] - 1)
            while len(norm_stats) < NEIGHBORS and (ilow > low_limit
                                                   or ihigh < high_limit):
                while ilow > low_limit:
                    # get stats from next useful column to left:
                    ilow -= 1
                    if ilow in fixable:
                        continue
                    use = cls._valid_pix(image, bpm, ilow)
                    use &= use_rows
                    if np.count_nonzero(use) < cls.MINIMUM_PIXELS:
                        continue
                    y = colord[use]
                    z = image.data[:, ilow][use]
                    ref_col, ref_slope, ref_var, ref_n = cls._clippedLine(
                        y, z, doslope, cls.CLIP_SIGMA)
                    if ref_var <= 0.0:
                        continue
                    norm_stats.append([ref_col, ref_slope, ref_var, ref_n])
                    break

                while ihigh < high_limit:
                    # get stats from next useful column to right:
                    ihigh += 1
                    if ihigh in fixable:
                        continue
                    use = cls._valid_pix(image, bpm, ihigh)
                    use &= use_rows
                    if np.count_nonzero(use) < cls.MINIMUM_PIXELS:
                        continue
                    y = colord[use]
                    z = image.data[:, ihigh][use]
                    ref_col, ref_slope, ref_var, ref_n = cls._clippedLine(
                        y, z, doslope, cls.CLIP_SIGMA)
                    if ref_var <= 0.0:
                        continue
                    norm_stats.append([ref_col, ref_slope, ref_var, ref_n])
                    break

            if len(norm_stats) < NEIGHBORS:
                # Don't fix the column if we did not get comparison columns
                logger.info(
                    'Not enough comparison columns to fix col {:d}'.format(
                        icol))
                continue

            # Calculate the weighted mean estimate of mean neighbor cols
            mean = np.array([i[0] for i in norm_stats])
            var = np.array([i[1] for i in norm_stats])
            wt = np.array([i[2] for i in norm_stats]) / var
            #slope = np.array([i[1] for i in norm_stats])
            var = np.array([i[2] for i in norm_stats])
            wt = np.array([i[3] for i in norm_stats]) / var
            nc = np.array([i[3] for i in norm_stats])
            # Do not apply correction if the target column's variance is much
            # different from the comparison columns
            norm_var = np.sum(var * wt) / np.sum(wt)
            if np.abs(col_var - norm_var) > VAR_TOLERANCE * norm_var:
                logger.info(
                    'Too much variance to fix column {:d}'.format(icol))
                continue
            #Check that number of target column sky pixels is not much less than
            #the average of the reference columns
            norm_n = np.sum(nc) / np.size(nc)
            if col_n < COUNT_TOL * norm_n:
                logger.info(
                    'Too few sky pixels to fix column {:d}'.format(icol))
                continue

            #Valid correction.  Calculate correction & error estimate
            norm_mean = np.sum(mean * wt) / np.sum(wt)
            correction = norm_mean - col_mean
            #correction_var = 1. / np.sum(wt) + col_var / col_n
            # Apply correction:
            image.data[:, icol][corr_rows] += correction
            # Promote the corrected pixels from useless to just imperfect:
            image.mask[:, icol][corr_rows] &= ~maskbits.BADPIX_BPM
            image.mask[:, icol][corr_rows] |= maskbits.BADPIX_FIXED
            logger.info('Corrected column {:d} by {:f}'.format(
                icol, float(correction)))

        if bpm.sourcefile is None:
            image.write_key('FIXCFIL',
                            'UNKNOWN',
                            comment='BPM file for fixing columns')
        else:
            image.write_key('FIXCFIL',
                            path.basename(bpm.sourcefile),
                            comment='BPM file for fixing columns')

        logger.debug('Finished fixing columns')

        ret_code = 0
        return ret_code
def medclip(data, clipsig=3.0, maxiter=10, converge_num=0.001, verbose=0):
    """ Function to examine data and determine average, median, stddev
    using a clipping algorithm
    Inputs: data: image array
            clipsig:  The number of N-sigma to be excluded when clipping
            maxiter:  Maximum number of iterations to perform
            converge_num:  Convergence Criteria

    Outputs: avgval: average from clipped distribution
             medval: median from clipped distribution
             stdval: stddev from clipped distribution
    """

    ct = data.size
    iter = 0
    c1 = 1.0
    c2 = 0.0

    avgval = np.mean(data)
    medval = np.median(data)
    sig = np.std(data)
    wsm = np.where(abs(data - medval) < clipsig * sig)
    if 0 < verbose < 4:
        logger.debug("iter,avgval,medval,sig")
    if 2 < verbose < 4:
        logger.debug("{:d} {:.2f} {:.2f} {:.2f} ".format(
            0, avgval, medval, sig))
    if verbose > 3:
        logger.debug("iter,avgval,medval,sig")
        logger.debug("{:d} {:.2f} {:.2f} {:.2f} {:d} {:d} {:.1f} ".format(
            0, avgval, medval, sig, ct, c1, c2))

    while c1 >= c2 and iter < maxiter:
        iter += 1
        lastct = ct
        avgval = np.mean(data[wsm])
        medval = np.median(data[wsm])
        sig = np.std(data[wsm])
        wsm = np.where(abs(data - medval) < clipsig * sig)
        ct = len(wsm[0])
        if ct > 0:
            c1 = abs(ct - lastct)
            c2 = converge_num * lastct
        if 2 < verbose < 4:
            logger.debug("{:d} {:.2f} {:.2f} {:.2f} ".format(
                iter, avgval, medval, sig))
        if verbose > 3:
            logger.debug("{:d} {:.2f} {:.2f} {:.2f} {:d} {:d} {:.1f} ".format(
                iter, avgval, medval, sig, ct, c1, c2))
#   End of while loop
    if iter >= maxiter:
        logger.info(
            "Warning: medclip had not yet converged after {:d} iterations".
            format(iter))

    medval = np.median(data[wsm])
    avgval = np.mean(data[wsm])
    stdval = np.std(data[wsm])
    if verbose > 0:
        logger.info("{:d} {:.2f} {:.2f} {:.2f} ".format(
            iter + 1, avgval, medval, stdval))

    return avgval, medval, stdval
示例#15
0
    def __call__(cls,
                 streak_list,
                 image_list,
                 streak_name_in,
                 streak_name_out,
                 image_name_in,
                 image_name_out,
                 add_width,
                 max_extrapolate,
                 plotfile=None):
        """
        Read input list of streak detections and predict where a streak
        crossed a CCD but was missed.  Then create new copies of images,
        altering masks to set STREAK bit in new streaks.

        :Parameters:
            - `streak_list`: list of input streak file names
            - `image_list`: list of names of image files to be updated
            - `streak_name_in`: string to replace in input streak filenames
            - `streak_name_out`: replacement string for output streak filenames
            - `image_name_in`: string to replace in input image filenames
            - `image_name_out`: replacement string for output image filenames
            - `add_width`:  number of pixels to grow (or shrink) streak width
            - `max_extrapolate`: farthest to start a new streak from endpoint of an existing one (degrees)
            - `plotfile`: if given, a diagram of streaks is drawn into this file
        """

        logger.info('Reading {:d} streak files'.format(len(streak_list)))

        # Read in all the streak RA/Dec, into a dictionary keyed by CCDNUM,
        # which should be in the primary header.  Also save a dictionary of
        # the file names for these
        streak_corners = {}
        streak_names = {}
        for streakfile in streak_list:
            logger.info(f"Reading streak file {streakfile}")
            with fitsio.FITS(streakfile, 'r') as fits:
                ccdnum = fits[0].read_header()['CCDNUM']
                streak_names[ccdnum] = streakfile
                tab = fits[1].read()
                if len(tab) > 0:
                    streak_corners[ccdnum] = fits[1].read()['CORNERS_WCS']

        logger.info('Reading WCS from {:d} CCDs'.format(len(image_list)))

        # Read in the WCS for each CCD for which we have an image,
        # also put into dicts keyed by CCDNUM
        # Will get these directly from FITS instead of using DESImage in order
        # to save reading all of the data.
        wcs = {}
        crval1 = []
        crval2 = []
        for imgfile in image_list:
            try:
                hdr = fitsio.read_header(imgfile, 0)
                ccd = hdr['CCDNUM']
                crval1.append(hdr['CRVAL1'])
                crval2.append(hdr['CRVAL2'])
                # Due to a bug in fitsio 1.0.0rc1+0, we need to clean up the
                # header before feeding it to wcsutil and remove the 'None' and other problematic items
                for k in hdr:
                    # Try to access the item, if failed we have to remove it
                    if not k:
                        hdr.delete(k)
                        continue
                    try:
                        _ = hdr[k]
                    except:
                        logger.info(
                            "Removing keyword: {:s} from header".format(k))
                        hdr.delete(k)
                wcs[ccd] = wcsutil.WCS(hdr)
            except Exception as e:
                print(e)  ###
                logger.error('Failure reading WCS from {:s}'.format(imgfile))
                return 1

        # Determine a center for local gnomonic projection
        ra0 = np.median(crval1)
        dec0 = np.median(crval2)

        # Calculate upper and lower bounds of each CCD in the local
        # gnomonic system.
        ccd_x1 = np.zeros(63, dtype=float)
        ccd_x2 = np.zeros(63, dtype=float)
        ccd_y1 = np.zeros(63, dtype=float)
        ccd_y2 = np.zeros(63, dtype=float)

        ccd_xmin = 1.
        ccd_xmax = 2048.
        ccd_ymin = 1.
        ccd_ymax = 4096.
        ccd_corners_xpix = np.array([ccd_xmin, ccd_xmin, ccd_xmax, ccd_xmax])
        ccd_corners_ypix = np.array([ccd_ymin, ccd_ymax, ccd_ymax, ccd_ymin])
        for ccd, w in wcs.items():
            ra, dec = w.image2sky(ccd_corners_xpix, ccd_corners_ypix)
            x_corners, y_corners = gnomonic(ra, dec, ra0, dec0)
            ccd_x1[ccd] = np.min(x_corners)
            ccd_y1[ccd] = np.min(y_corners)
            ccd_x2[ccd] = np.max(x_corners)
            ccd_y2[ccd] = np.max(y_corners)

        # Now collect information on all of the streak segments that we have
        ccdnum = []
        ra_corner = []
        dec_corner = []

        for ccd, streaks in streak_corners.items():
            if ccd not in wcs:
                # Skip segments on CCDs that have no WCS
                logger.warning(
                    'No WCS found for streaks on CCD {:d}'.format(ccd))
                continue
            n1, _, _ = streaks.shape
            for i in range(n1):
                ccdnum.append(ccd)
                ra_corner.append(streaks[i, :, 0])
                dec_corner.append(streaks[i, :, 1])
        # Put streak corners into gnomonic system for this exposure
        x1, y1 = gnomonic(np.array([r[0] for r in ra_corner], dtype=float),
                          np.array([d[0] for d in dec_corner], dtype=float),
                          ra0, dec0)
        x2, y2 = gnomonic(np.array([r[1] for r in ra_corner], dtype=float),
                          np.array([d[1] for d in dec_corner], dtype=float),
                          ra0, dec0)
        x3, y3 = gnomonic(np.array([r[2] for r in ra_corner], dtype=float),
                          np.array([d[2] for d in dec_corner], dtype=float),
                          ra0, dec0)
        x4, y4 = gnomonic(np.array([r[3] for r in ra_corner], dtype=float),
                          np.array([d[3] for d in dec_corner], dtype=float),
                          ra0, dec0)
        ccdnum = np.array(ccdnum, dtype=int)

        # Describe each segmet by two endpoints at the midpoints of short sides
        # Will need to decide which is the short side
        d12 = np.hypot(x2 - x1, y2 - y1)
        d23 = np.hypot(x3 - x2, y3 - y2)
        xleft = np.where(d12 < d23, 0.5 * (x1 + x2), 0.5 * (x2 + x3))
        yleft = np.where(d12 < d23, 0.5 * (y1 + y2), 0.5 * (y2 + y3))
        xright = np.where(d12 < d23, 0.5 * (x3 + x4), 0.5 * (x4 + x1))
        yright = np.where(d12 < d23, 0.5 * (y3 + y4), 0.5 * (y4 + y1))
        dx = xright - xleft
        dy = yright - yleft
        # Calculate a width as 2x the
        # largest perp distance from a vertex to this line
        w1 = np.abs(dx * (y1 - yleft) - dy * (x1 - xleft)) / np.hypot(dx, dy)
        w2 = np.abs(dx * (y2 - yleft) - dy * (x2 - xleft)) / np.hypot(dx, dy)
        w3 = np.abs(dx * (y3 - yleft) - dy * (x3 - xleft)) / np.hypot(dx, dy)
        w4 = np.abs(dx * (y4 - yleft) - dy * (x4 - xleft)) / np.hypot(dx, dy)
        wmax = np.maximum(w1, w2)
        wmax = np.maximum(wmax, w3)
        wmax = np.maximum(wmax, w4)
        wmax = 2 * wmax

        # Rearrange so that xleft <= xright
        swapit = xright < xleft
        tmp = np.where(swapit, xleft, xright)
        xleft = np.where(swapit, xright, xleft)
        xright = np.array(tmp)
        tmp = np.where(swapit, yleft, yright)
        yleft = np.where(swapit, yright, yleft)
        yright = np.array(tmp)

        # Get the crossing points of the lines into CCDs
        xc1, xc2, yc1, yc2 = boxCross(xleft, yleft, dx, dy, ccd_x1[ccdnum],
                                      ccd_x2[ccdnum], ccd_y1[ccdnum],
                                      ccd_y2[ccdnum])

        # Get rid of segments that appear to miss their host CCDs
        miss = xc2 < xc1

        # Take 1st crossing point instead of left point if it has higher x, or vertical
        # with higher y, i.e. truncate the track segment at the edge of the CCD.
        replace = np.where(dx == 0, yc1 > yleft, xc1 > xleft)
        xc1 = np.where(replace, xc1, xleft)
        yc1 = np.where(replace, yc1, yleft)
        # Likewise truncate segment at right-hand crossing
        replace = np.where(dx == 0, yc2 < yright, xc2 < xright)
        xc2 = np.where(replace, xc2, xright)
        yc2 = np.where(replace, yc2, yright)

        # Backfill the non-intersections again - note that above
        # maneuvers will leave xc2<xc1 for streaks that miss their CCDs,
        # unless vertical ???
        xc1[miss] = 0.
        xc2[miss] = -1.

        # Get a final verdict on hit or miss
        miss = np.where(dx == 0, yc2 < yc1, xc2 < xc1)

        # Save information on all valid streaks
        xc1 = xc1[~miss]
        xc2 = xc2[~miss]
        yc1 = yc1[~miss]
        yc2 = yc2[~miss]
        wmax = wmax[~miss]
        ccdnum = ccdnum[~miss]

        # Express segments as slopes and midpoints
        dx = xc2 - xc1
        dy = yc2 - yc1
        mx = dx / np.hypot(dx, dy)
        my = dy / np.hypot(dx, dy)

        # Mark segments that are probably spurious edge detections
        EDGE_SLOPE = 2.  # Degrees from horizontal for edge streaks
        EDGE_DISTANCE = 0.005  # Max degrees from streak center to CCD edge for spurious streaks
        horizontal = np.abs(my) < np.sin(EDGE_SLOPE * np.pi / 180.)
        ymid = 0.5 * (yc1 + yc2)
        nearedge = np.logical_or(ccd_y2[ccdnum] - ymid < EDGE_DISTANCE,
                                 ymid - ccd_y1[ccdnum] < EDGE_DISTANCE)
        nearedge = np.logical_and(nearedge, horizontal)

        # Check short edges too
        vertical = np.abs(mx) < np.sin(EDGE_SLOPE * np.pi / 180.)
        xmid = 0.5 * (xc1 + xc2)
        tmp = np.logical_or(ccd_x2[ccdnum] - xmid < EDGE_DISTANCE,
                            xmid - ccd_x1[ccdnum] < EDGE_DISTANCE)
        nearedge = np.logical_or(nearedge, np.logical_and(tmp, vertical))

        # Decide which segments are "friends" of each other.
        # To be a friend, the center of each must be close
        # to the extension of the line of the other.
        # Accumulate a list of tracks, each track is a list of
        # individual streaks that are friends of friends
        tracks = []

        for i in range(len(xc1)):
            if nearedge[i]:
                continue  # Do not use edge tracks
            itstrack = [i]  # start new track with just this
            for t in tracks:
                # Search other tracks for friends
                for j in t:
                    if friends(xc1, xc2, yc1, yc2, mx, my, ccdnum, i, j):
                        itstrack += t  # Merge track
                        tracks.remove(t)  # Get rid of old one
                        break  # No need to check others
            tracks.append(itstrack)

        # Now iterate through tracks, seeing if they have missing segments
        # Create arrays to hold information on new tracks
        new_ccdnum = []
        new_xc1 = []
        new_xc2 = []
        new_yc1 = []
        new_yc2 = []
        new_ra1 = []
        new_ra2 = []
        new_dec1 = []
        new_dec2 = []
        new_width = []
        new_extrapolated = []
        new_nearest = []

        for t in tracks:
            if len(t) < 2:
                continue  # Do not extrapolate singlet tracks
            ids = np.array(
                t)  # Make an array of indices of segments in this track
            # Fit a quadratic path to the streak endpoints
            xx = np.concatenate((xc1[ids], xc2[ids]))
            yy = np.concatenate((yc1[ids], yc2[ids]))

            # If the track slope is mostly along x, then we'll have the independent
            # variable xx be x and dependent yy will be y.  But if track
            # is more vertical, then we'll look at functions x(y) instead.
            xOrder = np.median(np.abs(mx[ids])) > np.median(np.abs(my[ids]))
            if not xOrder:
                xx, yy = yy, xx

            # Record limits of detected tracks' independent variable
            xxmin = np.min(xx)
            xxmax = np.max(xx)

            # Fit a quadratic to the points, or
            # linear if only one streak
            # Allow up to nclip points to clip
            RESID_TOLERANCE = 6. / 3600.  # Clip >6" deviants
            nclip = 2
            for i in range(nclip + 1):
                if len(xx) > 2:
                    A = np.vstack((np.ones_like(xx), xx, xx * xx))
                else:
                    A = np.vstack((np.ones_like(xx), xx))
                coeffs = np.linalg.lstsq(A.T, yy, rcond=None)[0]
                resid = yy - np.dot(A.T, coeffs)
                j = np.argmax(np.abs(resid))
                if i == nclip or np.abs(resid[j]) < RESID_TOLERANCE:
                    break
                xx = np.delete(xx, j)
                yy = np.delete(yy, j)

            # Calculate the y(x1),y(x2) where tracks
            # cross the left/right of every CCD, then
            # find the ones that will cross CCD's y.

            # These are CCD bounds, with xx being the quadratic's argument
            if xOrder:
                xx1 = ccd_x1
                xx2 = ccd_x2
                yy1 = ccd_y1
                yy2 = ccd_y2
            else:
                xx1 = ccd_y1
                xx2 = ccd_y2
                yy1 = ccd_x1
                yy2 = ccd_x2

            if len(coeffs) == 2:
                A2 = np.vstack((np.ones_like(xx2), xx2)).T
                A1 = np.vstack((np.ones_like(xx1), xx1)).T
            else:
                A2 = np.vstack((np.ones_like(xx2), xx2, xx2 * xx2)).T
                A1 = np.vstack((np.ones_like(xx1), xx1, xx1 * xx1)).T

            # yyc[12] are the dependent coordinate at crossings of xx[12] bounds
            yyc1 = np.dot(A1, coeffs)
            yyc2 = np.dot(A2, coeffs)
            # Now we ask whether the y value of streak at either edge crossing
            # is in the y range of a CCD
            missed = np.logical_or(
                np.maximum(yyc1, yyc2) < yy1,
                np.minimum(yyc1, yyc2) > yy2)
            # Also skip any CCD where we already have a streak
            for iccd in ccdnum[ids]:
                missed[iccd] = True
            missed[0] = True  # There is no CCD0
            missed[61] = True  # Never use this one either, it's always dead

            # Now find intersection of new streaks with edges of their CCDs
            # Define a function for the streak path that we'll use for solving
            def poly(x, coeffs, ysolve):
                y = coeffs[0] + x * coeffs[1]
                if len(coeffs) > 2:
                    y += coeffs[2] * x * x
                return y - ysolve

            EDGE_TOLERANCE = 0.2 / 3600.  # Find x/y of edge to this accuracy (0.2 arcsec)
            for iccd in np.where(~missed)[0]:
                # This is a loop over every CCD that the track crosses but has no detected segment
                # Determine an (xx,yy) pair for its entry and exit from the CCD
                new_yy1 = yyc1[iccd]
                new_yy2 = yyc2[iccd]
                new_xx1 = xx1[iccd]
                new_xx2 = xx2[iccd]
                # left side:
                if new_yy1 < yy1[iccd]:
                    new_xx1 = newton(poly,
                                     new_xx1,
                                     args=(coeffs, yy1[iccd]),
                                     tol=EDGE_TOLERANCE)
                elif new_yy1 > yy2[iccd]:
                    new_xx1 = newton(poly,
                                     new_xx1,
                                     args=(coeffs, yy2[iccd]),
                                     tol=EDGE_TOLERANCE)
                new_yy1 = poly(new_xx1, coeffs, 0.)
                # right side
                if new_yy2 < yy1[iccd]:
                    new_xx2 = newton(poly,
                                     new_xx2,
                                     args=(coeffs, yy1[iccd]),
                                     tol=EDGE_TOLERANCE)
                elif new_yy2 > yy2[iccd]:
                    new_xx2 = newton(poly,
                                     new_xx2,
                                     args=(coeffs, yy2[iccd]),
                                     tol=EDGE_TOLERANCE)
                new_yy2 = poly(new_xx2, coeffs, 0.)
                # Does the solution lie outside the input streaks?
                extrapolated = new_xx1 < xxmin or new_xx2 > xxmax
                width = np.median(wmax[ids])

                # Calculate distance to nearest unclipped streak member
                nearest = min(np.min(np.hypot(xx - new_xx1, yy - new_yy1)),
                              np.min(np.hypot(xx - new_xx2, yy - new_yy2)))

                if not xOrder:
                    # swap xx,yy back if we had y as the independent variable
                    new_xx1, new_yy1 = new_yy1, new_xx1
                    new_xx2, new_yy2 = new_yy2, new_xx2

                # Project the coordinates back to RA, Dec
                ra1, dec1 = gnomonicInverse(new_xx1, new_yy1, ra0, dec0)
                ra2, dec2 = gnomonicInverse(new_xx2, new_yy2, ra0, dec0)

                # Append this streak to list of new ones
                new_ccdnum.append(iccd)
                new_xc1.append(new_xx1)
                new_xc2.append(new_xx2)
                new_yc1.append(new_yy1)
                new_yc2.append(new_yy2)
                new_ra1.append(ra1)
                new_ra2.append(ra2)
                new_dec1.append(dec1)
                new_dec2.append(dec2)
                new_width.append(width)
                new_extrapolated.append(extrapolated)
                new_nearest.append(nearest)

        # Make all lists into arrays
        new_ccdnum = np.array(new_ccdnum, dtype=int)
        new_xc1 = np.array(new_xc1, dtype=float)
        new_xc2 = np.array(new_xc2, dtype=float)
        new_yc1 = np.array(new_yc1, dtype=float)
        new_yc2 = np.array(new_yc2, dtype=float)
        new_ra1 = np.array(new_ra1, dtype=float)
        new_ra2 = np.array(new_ra2, dtype=float)
        new_dec1 = np.array(new_dec1, dtype=float)
        new_dec2 = np.array(new_dec2, dtype=float)
        new_width = np.array(new_width, dtype=float)
        new_extrapolated = np.array(new_extrapolated, dtype=bool)
        new_nearest = np.array(new_nearest, dtype=float)

        # Decide which new segments will be masked
        maskit = np.logical_or(~new_extrapolated,
                               new_nearest <= max_extrapolate)

        logger.info('Identified {:d} missing streak segments for masking'.format(\
                    np.count_nonzero(maskit)))

        # Make the diagnostic plot if desired
        if plotfile is not None:
            pl.figure(figsize=(6, 6))
            pl.xlim(-1.1, 1.1)
            pl.ylim(-1.1, 1.1)
            pl.gca().set_aspect('equal')

            # Draw CCD outlines and numbers
            for ccd, w in wcs.items():
                ra, dec = w.image2sky(ccd_corners_xpix, ccd_corners_ypix)
                x_corners, y_corners = gnomonic(ra, dec, ra0, dec0)
                x = x_corners.tolist()
                y = y_corners.tolist()
                x.append(x[0])
                y.append(y[0])
                pl.plot(x, y, 'k-', label=None)
                x = np.mean(x_corners)
                y = np.mean(y_corners)
                pl.text(x,
                        y,
                        str(ccd),
                        horizontalalignment='center',
                        verticalalignment='center',
                        fontsize=14)

            # Draw input streaks marked as edge
            labelled = False
            for i in np.where(nearedge)[0]:
                x = (xc1[i], xc2[i])
                y = (yc1[i], yc2[i])
                if not labelled:
                    pl.plot(x, y, 'm-', lw=2, label='edge')
                    labelled = True
                else:
                    pl.plot(x, y, 'm-', lw=2, label=None)

            # Draw linked tracks
            s = set()
            for t in tracks:
                if len(t) > 1:
                    s = s.union(set(t))
            labelled = False
            for i in s:
                x = (xc1[i], xc2[i])
                y = (yc1[i], yc2[i])
                if not labelled:
                    pl.plot(x, y, 'b-', lw=2, label='connected')
                    labelled = True
                else:
                    pl.plot(x, y, 'b-', lw=2, label=None)

            # Draw singleton tracks as those that are neither edge nor connected
            s = s.union(set(np.where(nearedge)[0]))
            single = set(range(len(xc1)))
            single = single.difference(s)
            labelled = False
            for i in single:
                x = (xc1[i], xc2[i])
                y = (yc1[i], yc2[i])
                if not labelled:
                    pl.plot(x, y, 'c-', lw=2, label='unconnected')
                    labelled = True
                else:
                    pl.plot(x, y, 'c-', lw=2, label=None)

            # Draw missed tracks that will be masked
            labelled = False
            for i in np.where(maskit)[0]:
                x = (new_xc1[i], new_xc2[i])
                y = (new_yc1[i], new_yc2[i])
                if not labelled:
                    pl.plot(x, y, 'r-', lw=2, label='new masked')
                    labelled = True
                else:
                    pl.plot(x, y, 'r-', lw=2, label=None)

            # Draw missed tracks that will not be masked
            labelled = False
            for i in np.where(~maskit)[0]:
                x = (new_xc1[i], new_xc2[i])
                y = (new_yc1[i], new_yc2[i])
                if not labelled:
                    pl.plot(x, y, 'r:', lw=2, label='new skipped')
                    labelled = True
                else:
                    pl.plot(x, y, 'r:', lw=2, label=None)

            # legend
            pl.legend(framealpha=0.3, fontsize='small')
            pl.savefig(plotfile)

        # Now accumulate pixel coordinates of corners of all new streaks to mask
        added_streak_ccds = []
        added_streak_corners = []

        for id, ccd in enumerate(new_ccdnum):
            ccd = new_ccdnum[id]
            if not maskit[id]:
                continue  # Only proceed with the ones to be masked
            # Get a pixel scale from the WCS, in arcsec/pix
            xmid = np.mean(ccd_corners_xpix)
            ymid = np.mean(ccd_corners_ypix)
            ra, dec = wcs[ccd].image2sky(xmid, ymid)
            ra2, dec2 = wcs[ccd].image2sky(xmid + 1, ymid)
            pixscale = np.hypot(
                np.cos(dec * np.pi / 180.) * (ra - ra2), dec - dec2)

            # width of streak, in pixels
            w = new_width[id] / pixscale + add_width
            if w <= 0.:
                continue  # Don't mask streaks of zero width
            # Make RA/Dec of track endpoints
            x = np.array([new_xc1[id], new_xc2[id]])
            y = np.array([new_yc1[id], new_yc2[id]])
            ra, dec = gnomonicInverse(x, y, ra0, dec0)
            # Convert to pixel coordinates
            x, y = wcs[ccd].sky2image(ra, dec)
            line = Line(x[0], y[0], x[1], y[1])
            # Create bounding rectangle of track
            corners_pix = boxTrack(line, w, ccd_xmin, ccd_xmax, ccd_ymin,
                                   ccd_ymax)
            added_streak_ccds.append(ccd)
            added_streak_corners.append(np.array(corners_pix))

        added_streak_ccds = np.array(added_streak_ccds)

        # Make new copies of streak files, adding new ones
        logger.debug('Rewriting streak files')

        for ccd, streakfile_in in streak_names.items():
            nmatch = len(re.findall(streak_name_in, streakfile_in))
            if nmatch != 1:
                logger.error('Could not update streak file named <' +
                             streakfile_in + '>')
                return 1
            streakfile_out = re.sub(streak_name_in, streak_name_out,
                                    streakfile_in)
            # Use file system to make fresh copy of table's FITS file
            shutil.copy2(streakfile_in, streakfile_out)

            # Find new streaks for this ccd
            add_ids = np.where(added_streak_ccds == ccd)[0]
            if len(add_ids) > 0:
                # Open the table and add new streaks' info
                try:
                    fits = fitsio.FITS(streakfile_out, 'rw')
                    addit = np.recarray(len(add_ids),
                                        dtype=[('LABEL', '>i4'),
                                               ('CORNERS', '>f4', (4, 2)),
                                               ('CORNERS_WCS', '>f8', (4, 2))])
                    if fits[1]['LABEL'][:]:
                        first_label = np.max(fits[1]['LABEL'][:]) + 1
                    else:
                        first_label = 1
                    addit.LABEL = np.arange(first_label,
                                            first_label + len(addit))

                    for i, id in enumerate(add_ids):
                        corners_pix = added_streak_corners[id]
                        addit.CORNERS[i] = corners_pix
                        ra, dec = wcs[ccd].image2sky(corners_pix[:, 0],
                                                     corners_pix[:, 1])
                        addit.CORNERS_WCS[i] = np.vstack((ra, dec)).T

                    fits[1].append(addit)
                    fits.close()
                except Exception as e:
                    print(e)
                    logger.error('Failure updating streak file <{:s}>'.format(
                        streakfile_out))
                    return 1

        logger.debug('Remasking images')

        for imgfile_in in image_list:
            # Make the name needed for output
            nmatch = len(re.findall(image_name_in, imgfile_in))
            if nmatch != 1:
                logger.error(
                    'Could not create output name for image file named <' +
                    imgfile_in + '>')
                return 1
            imgfile_out = re.sub(image_name_in, image_name_out, imgfile_in)

            logger.info(f"Loading image: {imgfile_in}")
            sci = DESImage.load(imgfile_in)
            ccd = sci.header['CCDNUM']

            # Find added streaks for this ccd
            add_ids = np.where(added_streak_ccds == ccd)[0]
            if len(add_ids) > 0:
                shape = sci.mask.shape
                yy, xx = np.indices(shape)
                points = np.vstack((xx.flatten(), yy.flatten())).T
                inside = None

                for id in add_ids:
                    # From Alex's immask routine: mark interior pixels
                    # for each added streak
                    v = added_streak_corners[id]
                    vertices = [(v[0, 0], v[0, 1]), (v[1, 0], v[1, 1]),
                                (v[2, 0], v[2, 1]), (v[3, 0], v[3, 1]),
                                (v[0, 0], v[0, 1])]
                    path = matplotlib.path.Path(vertices)

                    if inside is None:
                        inside = path.contains_points(points)
                    else:
                        inside = np.logical_or(inside,
                                               path.contains_points(points))

                # Make the list of masked pixels
                if inside is None:
                    ymask, xmask = np.array(0, dtype=int), np.array(0,
                                                                    dtype=int)
                else:
                    ymask, xmask = np.nonzero(inside.reshape(shape))

                sci.mask[ymask, xmask] |= parse_badpix_mask('STREAK')

            # Write something into the image header

            sci['DESCNCTS'] = time.asctime(time.localtime()) + \
                            ' Mask {:d} new streaks'.format(len(add_ids))
            #            sci['HISTORY'] = time.asctime(time.localtime()) + \
            #                             ' Mask {:d} new streaks'.format(len(add_ids))
            logger.info(f"Saving to: {imgfile_out}")
            sci.save(imgfile_out)

        logger.info('Finished connecting streaks')
        ret_code = 0
        return ret_code
    def __call__(cls, in_filename, out_filename, ccdnum, img_template,
                 reject_rms, mem_use):
        """
        Create full-resolution sky templates based on previous PCA.
        Does this pixel by pixel, via robust fitting of the data in the input
        full-res images to the PCA coefficients.

        :Parameters:
            - `in_filename`: the file holding the PCA outputs on compressed sky
            - `out_filename`: filename for the output template
            - `ccdnum`: which CCD to produce templates for
            - `img_template`: string that can be formatted with the expnum to yield
                              filename of the DESImage holding the full-res data.
            - `reject_rms`: Exclude exposures with fractional RMS residual sky above this.
                            If this is None, just uses the exposures that PCA used.
            - `mem_use:` Number of GB to target for memory usage
        """

        logger.info('Starting sky template construction')

        # Acquire PCA information, including the table of info on input exposures
        pc = skyinfo.MiniskyPC.load(in_filename)
        # ??? Should make this table part of the MiniskyPC class:
        pctab = fitsio.read(in_filename, ext='EXPOSURES')

        # Build a MiniDECam that has our choice of CCDs that we can use for indexing.
        mini = pc.get_pc(0)
        # Quit if we are requesting template for a CCD that was not compressed
        detpos = decaminfo.detpos_dict[ccdnum]
        try:
            mini.index_of(detpos, 1, 1)
        except skyinfo.SkyError:
            logger.error('Template requested for CCDNUM not included in PCA')
            return 1

        # Select exposures we'll use
        if reject_rms is None:
            # If no RMS threshold is specified, use the same exposures
            # that were kept during PCA of compressed skies
            use = np.array(pctab['USE'])
        else:
            # Choose our own threshold
            use = pctab['RMS'] < reject_rms
        nimg = np.count_nonzero(use)

        expnums = []
        vv = []
        for i, val in enumerate(use):
            if val:
                vv.append(pctab['COEFFS'][i])
                expnums.append(pctab['EXPNUM'][i])
        V = np.vstack(vv)
        del vv

        # We'll re-normalize each exposure, and its coefficients, by V[0]
        norms = np.array(V[:, 0])
        V = V.T / norms  # V is now of shape (npc,nimg)

        npc = pc.U.shape[1]
        ySize = decaminfo.shape[0]
        xSize = decaminfo.shape[1]

        # Create the output array
        out = np.zeros((npc, ySize, xSize), dtype=np.float32)

        # Only fill half of it for the bad amp:
        if ccdnum == decaminfo.ccdnums['S7'] and pc.halfS7:
            xSize /= 2

        # Decide how many rows of blocks we'll read from files at a time
        bytes_per_row = 4 * xSize * pc.blocksize * nimg
        #xBlocks = xSize / pc.blocksize
        yBlocks = min(int(np.floor(mem_use * (2**30) / bytes_per_row)),
                      ySize / pc.blocksize)

        if yBlocks < 1:
            logger.warning(
                'Proceeding even though mem_use is not enough to store 1 row of blocks'
            )
            yBlocks = 1

        d = {'ccd': ccdnum}

        # Collect input data in chunks of yBlocks rows of blocks, then process one block at a time.
        for yStart in range(0, ySize, yBlocks * pc.blocksize):
            # Acquire the pixel data into a 3d array
            yStop = min(ySize, yStart + yBlocks * pc.blocksize)
            logger.info('Working on rows {:d} -- {:d}'.format(yStart, yStop))
            data = np.zeros((nimg, yStop - yStart, xSize), dtype=np.float32)

            for i, expnum in enumerate(expnums):
                d['expnum'] = expnum
                filename = img_template.format(**d)
                logger.debug('Getting pixels from ' + filename)
                with fitsio.FITS(filename) as fits:
                    data[i, :, :] = fits['SCI'][yStart:yStop, :xSize]
            data /= norms[:, np.newaxis,
                          np.newaxis]  # Apply norms to be near zero

            # Now cycle through all blocks
            for jb in range((yStop - yStart) / pc.blocksize):
                for ib in range(xSize / pc.blocksize):
                    logger.debug('Fitting for block ({:d},{:d})'.format(
                        jb + yStart / pc.blocksize, ib))
                    # Use PCA of this block as starting guess at solution
                    index = mini.index_of(detpos, yStart / pc.blocksize + jb,
                                          ib)
                    guess = np.array(pc.U[index, :])

                    # We'll scale the guess in each pixel by the typical ratio
                    # of this pixel's data to the PCA model for the block:
                    model = np.dot(guess, V)
                    ratio = data[:, jb * pc.blocksize:(jb + 1) * pc.blocksize,
                                 ib * pc.blocksize:(ib + 1) *
                                 pc.blocksize] / model[:, np.newaxis,
                                                       np.newaxis]
                    scale, var, n = clippedMean(ratio, 4, axis=0)
                    logger.debug('Var, scale, ratio shapes: ' + str(var.shape) + \
                                 ' ' + str(scale.shape) + ' ' + str(ratio.shape))

                    del ratio, n
                    # Solve each pixel in the block:
                    for jp in range(pc.blocksize):
                        for ip in range(pc.blocksize):
                            cost = skyinfo.ClippedCost(3 *
                                                       np.sqrt(var[jp, ip]))
                            # Execute and save the fit
                            out[:, yStart + jb * pc.blocksize + jp, ib * pc.blocksize + ip] = \
                              skyinfo.linearFit(data[:, jb * pc.blocksize + jp, ib * pc.blocksize + ip],
                                                V,
                                                guess * scale[jp, ip],
                                                cost)

            del data

        # Save the template into the outfile
        spc = skyinfo.SkyPC(out, detpos)
        spc.save(out_filename)

        logger.debug('Finished sky template')
        ret_code = 0
        return ret_code
示例#17
0
    def __call__(cls, in_filenames, out_filename, npc, reject_rms):
        """
        Perform robust PCA of a collection of MiniDecam images.

        :Parameters:
            - `in_filenames`: list of filenames of exposure mini-sky image
            - `out_filename`: filename for the output PCA
            - `npc`: Number of principal components to retain
            - `reject_rms`: Exclude exposures with fractional RMS above this
        """

        logger.info('Collecting images for PCA')
        if npc > skyinfo.MAX_PC:
            raise skyinfo.SkyError(
                "Requested number of sky pc's {:d} is above MAX_PC".format(
                    npc))
        mm = []  # will hold the data vectors for each exposure
        expnums = []  # Collect the exposure numbers of exposures being used
        data_length = None
        blocksize = None
        hdr = {}
        for f in in_filenames:
            mini = skyinfo.MiniDecam.load(f)
            v = mini.vector()
            if data_length is None:
                data_length = len(v)
            elif len(v) != data_length:
                logger.error('Mismatched sky data vector length in file ' + f)
                raise skyinfo.SkyError(
                    'Mismatched sky data vector length in file ' + f)
            # Also check for agreement of the mini-sky setup
            if blocksize is None:
                blocksize = mini.blocksize
                mask_value = mini.mask_value
                invalid = mini.invalid
                halfS7 = mini.halfS7
                hdr['BAND'] = mini.header['BAND']
                hdr['MINNITE'] = mini.header['NITE']
                hdr['MAXNITE'] = mini.header['NITE']
            else:
                if mini.blocksize != blocksize \
                   or mini.invalid != invalid \
                   or mini.halfS7 != halfS7:
                    logger.error('Mismatched minisky configuration in file ' +
                                 f)
                    raise skyinfo.SkyError(
                        'Mismatched minisky configuration in file ' + f)
                try:
                    # Die if there is a filter mismatch among exposures
                    items_must_match(hdr, mini.header, 'BAND')
                except:
                    return 1
                hdr['MINNITE'] = min(hdr['MINNITE'], mini.header['NITE'])
                hdr['MAXNITE'] = max(hdr['MAXNITE'], mini.header['NITE'])
            mm.append(np.array(v))
            expnums.append(int(mini.header['EXPNUM']))
        m = np.vstack(mm).transpose()
        del mm

        logger.info("Start first PCA cycle")
        U, S, v = process(m, npc)

        pc = skyinfo.MiniskyPC(U,
                               blocksize=blocksize,
                               mask_value=mask_value,
                               invalid=invalid,
                               header=hdr,
                               halfS7=halfS7)

        # Refit each exposure to this PCA,
        nexp = len(in_filenames)
        V = np.zeros((nexp, U.shape[1]), dtype=float)
        rms = np.zeros(nexp, dtype=float)
        frac = np.zeros(nexp, dtype=float)
        for i in range(nexp):
            mini.fill_from(m[:, i])
            pc.fit(mini, clip_sigma=3.)
            rms[i] = mini.rms
        use = rms <= reject_rms
        logger.info('Retained {:d} out of {:d} exposures'.format(
            np.count_nonzero(use), nexp))

        # New PCA excluding outliers
        logger.info("Start second PCA cycle")
        U, S, v = process(m[:, use], npc)
        pc.U = U

        pc.save(out_filename)

        # Recollect statistics and save
        logger.info("Collecting statistics")
        for i in range(nexp):
            mini.fill_from(m[:, i])
            pc.fit(mini, clip_sigma=3.)
            V[i, :] = mini.coeffs
            rms[i] = mini.rms
            frac[i] = mini.frac

        # Write V and a results table
        skyinfo.MiniskyPC.save_exposures(out_filename, expnums, V, rms, frac,
                                         use, S)

        logger.debug('Finished PCA')
        ret_code = 0
        return ret_code
    def __call__(cls, image, overscan_sample, overscan_function,
                 overscan_order, overscan_trim):
        """Apply an overscan correction to a raw DES image

        :Parameters:
            - `image`: the DESImage to determine and apply an ovescan correction

        Applies the correction "in place"
        """

        logger.info('Applying Overscan')
        #        ret_code = bpm_c(image.cstruct, bpm_im.cstruct)
        biassecan = [x - 1 for x in scan_fits_section(image, 'BIASSECA')]
        biassecbn = [x - 1 for x in scan_fits_section(image, 'BIASSECB')]
        print "BIASSECA: ", biassecan
        print "BIASSECB: ", biassecbn
        biassecan[0] += overscan_trim
        biassecan[1] -= overscan_trim
        biassecbn[0] += overscan_trim
        biassecbn[1] -= overscan_trim
        print "BIASSECA (trimmed): ", biassecan
        print "BIASSECB (trimmed): ", biassecbn

        datasecan = [x - 1 for x in scan_fits_section(image, 'DATASECA')]
        datasecbn = [x - 1 for x in scan_fits_section(image, 'DATASECB')]
        datasecn = [x - 1 for x in scan_fits_section(image, 'DATASEC')]
        print "DATASECA: ", datasecan
        print "DATASECB: ", datasecbn
        if (datasecan[2] > datasecan[3]):
            ytemp = datasecan[2]
            datasecan[2] = datasecan[3]
            datasecan[3] = ytemp
        if (datasecbn[2] > datasecbn[3]):
            ytemp = datasecbn[2]
            datasecbn[2] = datasecbn[3]
            datasecbn[3] = ytemp
        print "DATASECA (ordered): ", datasecan
        print "DATASECB (ordered): ", datasecbn
        print "DATASEC  (ordered): ", datasecn

        ampsecan = [x - 1 for x in scan_fits_section(image, 'AMPSECA')]
        ampsecbn = [x - 1 for x in scan_fits_section(image, 'AMPSECB')]
        print "AMPSECA: ", ampsecan
        print "AMPSECB: ", ampsecbn
        #       order x-ranges
        if (ampsecan[0] > ampsecan[1]):
            xtemp = ampsecan[0]
            ampsecan[0] = ampsecan[1]
            ampsecan[1] = xtemp
        if (ampsecbn[0] > ampsecbn[1]):
            xtemp = ampsecbn[0]
            ampsecbn[0] = ampsecbn[1]
            ampsecbn[1] = xtemp
#       order y-ranges
        if (ampsecan[2] > ampsecan[3]):
            ytemp = ampsecan[2]
            ampsecan[2] = ampsecan[3]
            ampsecan[3] = ytemp
        if (ampsecbn[2] > ampsecbn[3]):
            ytemp = ampsecbn[2]
            ampsecbn[2] = ampsecbn[3]
            ampsecbn[3] = ytemp
        print "AMPSECA(ordered): ", ampsecan
        print "AMPSECB(ordered): ", ampsecbn
        #
        #       Obtain/sample the overscan strip(s) to obtain a set of values.
        #
        if (overscan_sample < 0):
            overscan_a = np.median(image.data[biassecan[2]:biassecan[3] + 1,
                                              biassecan[0]:biassecan[1] + 1],
                                   axis=1)
            overscan_b = np.median(image.data[biassecbn[2]:biassecbn[3] + 1,
                                              biassecbn[0]:biassecbn[1] + 1],
                                   axis=1)
        elif (overscan_sample == 0):
            overscan_a = image.data[biassecan[2]:biassecan[3] + 1,
                                    biassecan[0]:biassecan[1] + 1].mean(axis=1)
            overscan_b = image.data[biassecbn[2]:biassecbn[3] + 1,
                                    biassecbn[0]:biassecbn[1] + 1].mean(axis=1)
        elif (overscan_sample > 0):
            overscan_a = (
                image.data[biassecan[2]:biassecan[3] + 1,
                           biassecan[0]:biassecan[1] + 1].sum(axis=1) -
                image.data[biassecan[2]:biassecan[3] + 1,
                           biassecan[0]:biassecan[1] + 1].min(axis=1) -
                image.data[biassecan[2]:biassecan[3] + 1,
                           biassecan[0]:biassecan[1] + 1].max(axis=1)
            ) / float(biassecan[1] - biassecan[0] - 1)
            overscan_b = (
                image.data[biassecbn[2]:biassecbn[3] + 1,
                           biassecbn[0]:biassecbn[1] + 1].sum(axis=1) -
                image.data[biassecbn[2]:biassecbn[3] + 1,
                           biassecbn[0]:biassecbn[1] + 1].min(axis=1) -
                image.data[biassecbn[2]:biassecbn[3] + 1,
                           biassecbn[0]:biassecbn[1] + 1].max(axis=1)
            ) / float(biassecan[1] - biassecan[0] - 1)

        print overscan_a.shape
        print overscan_a
        print overscan_a.dtype
        #
        #       If requested, reform the overscan estimate using a fit.
        #
        if (overscan_function < 0):
            #
            #           Spline interpolation of resampled set chosen
            #
            raise NotImplementedError
        elif (overscan_function > 0):
            #
            #           Polynomial fit (using Legendre polynomials) of a resampled set chosen
            #
            raise NotImplementedError
        elif (overscan_function == 0):
            overscan_val_a = overscan_a
            overscan_val_b = overscan_b


#
#       Subtract Overscan from the Image data
#
        print datasecn[3] - datasecn[2] + 1, datasecn[1] - datasecn[0] + 1
        newimage = np.empty(
            [datasecn[3] - datasecn[2] + 1, datasecn[1] - datasecn[0] + 1],
            dtype=data_dtype)
        newimage[:, :] = image.data[datasecn[2]:datasecn[3] + 1,
                                    datasecn[0]:datasecn[1] + 1]
        newimage[ampsecan[2]:ampsecan[3] + 1,
                 ampsecan[0]:ampsecan[1] + 1] -= overscan_val_a[:, np.newaxis]
        newimage[ampsecbn[2]:ampsecbn[3] + 1,
                 ampsecbn[0]:ampsecbn[1] + 1] -= overscan_val_b[:, np.newaxis]
        image.data = newimage
        print image.data.dtype

        #
        #       Trim data to form output
        #
        #        print image.data.shape
        #        print output.dtype
        #        import fitsio
        #        fitsio.write('outimage',output,clobber=True)
        #        print image.data.shape
        #        image.header['NAXIS1']=datasecn[1]-datasecn[0]+1
        #        image.header['NAXIS2']=datasecn[3]-datasecn[2]+1

        logger.info(' %d %d ' % image.data.shape)
        logger.debug('Finished applying Overscan')
        ret_code = 0
        return ret_code
示例#19
0
    def __call__(cls, image):
        """
        This is currently written with a single lightbulb in mind.  It may be generalizable if further cases
        occur (but not all the parts have yet been written with a generalized case in mind).
        Three components are generated depending  on the strength of the lightbulb:
            1) circular mask centered on the bulb with a radius sufficient to reach 1-sigma of the noise level
            2) columnar mask that extends toward the read registers (triggered when lightbulb has central brightness > XX
            3) columnar mask that extends away from the read registers  (triggered when light bulb is saturated)
        """
        #       A simple dictionary with parameters for the only known lightbulb
        #           Currently explist is set to encompass 20170901 and beyond (expnum>674105)
        #           This could be tightened to 693244 (20171101) or even to 694699 (the earliest weak lighbulb found so far)
        LBD = {46: {'explist': '674105-', 'xc': 795, 'yc': 2620, 'rad': 500}}

        if image['CCDNUM'] in LBD:
            check_for_light = lb.check_lightbulb_explist(
                image['EXPNUM'], LBD[image['CCDNUM']]['explist'])
            if check_for_light:
                logger.info(
                    'Image CCDNUM=46, in proscribed range checking for lightbulb'
                )
                bulbDict = lb.check_lightbulb(image,
                                              LBD[image['CCDNUM']],
                                              verbose=1)
                #
                #               Current criterion:
                #               1) saturate pixels detected, median brightness >100,000 and width above the lower end of range
                #               2) non-saturated:
                #                   requires successful fit (failed fits set g_wid, g_widerr, g_amp, g_amperr to -1)
                #                   width (FWHM) in range 70-140 and uncertainty less than 35
                #                   amplitude > 0 and uncertainty positive and less than sqrt(amp)
                #
                isBulb = False
                bulbDict['bulb'] = 'F'
                if bulbDict['num_sat'] > 0:
                    if bulbDict['g_wid'] > 70. and bulbDict['g_amp'] > 100000:
                        bulbDict['bulb'] = 'S'
                        isBulb = True
                if not isBulb:
                    if bulbDict['g_wid'] >= 70. and bulbDict['g_wid'] <= 140. and \
                       bulbDict['g_widerr'] > 0. and bulbDict['g_widerr'] < 35.:
                        if bulbDict['g_amp'] >= 0.:
                            if bulbDict['g_amperr'] > 0. and bulbDict[
                                    'g_amperr'] < np.sqrt(bulbDict['g_amp']):
                                bulbDict['bulb'] = 'T'
                                isBulb = True


#
#               If found use masking utility
#
                if isBulb:
                    logger.info(
                        ' LIGHTBULB: detected with central brightness: {:.1f}'.
                        format(bulbDict['bulb_sb']))
                    image = lb.mask_lightbulb(image,
                                              LBD[image['CCDNUM']],
                                              bulbDict,
                                              verbose=1)
                    image.write_key('DESBULB',
                                    'Peak SB {:.1f}, Radius {:.1f} '.format(
                                        bulbDict['bulb_sb'],
                                        bulbDict['g_wid']),
                                    comment='')

        logger.debug('Finished checking and applying mask for light bulb')
        ret_code = 0
        return ret_code
示例#20
0
    def __call__(cls,
                 image,
                 min_cols=DEFAULT_MINCOLS,
                 max_cols=DEFAULT_MAXCOLS,
                 interp_mask=DEFAULT_INTERP_MASK,
                 invalid_mask=DEFAULT_INVALID_MASK):
        """
        Interpolate over selected pixels by inserting average of pixels to left and right
        of any bunch of adjacent selected pixels.  If the interpolation region touches an
        edge, or the adjacent pixel has flags marking it as invalid, than the value at
        other border is used for interpolation.  No interpolation is done if both
        boundary pixels are invalid.

        :Parameters:
            - `image`: DESImage to fix.
            - `min_cols`: Minimum width of region to be interpolated.
            - `max_cols`: Maximum width of region to be interpolated.
            - `interp_mask`: Mask bits that will trigger interpolation
            - `invalid_mask`: Mask bits invalidating a pixel as interpolation source.
        """

        logger.info('Interpolating along rows')

        if image.mask is None:
            logger.error('Input image does not have mask')
            return 1

        interpolate = np.array(image.mask & interp_mask, dtype=bool)

        # Make arrays noting where a run of bad pixels starts or ends
        # Then make arrays has_?? which says whether left side is valid
        # and an array with the value just to the left/right of the run.
        work = np.array(interpolate)
        work[:, 1:] = np.logical_and(interpolate[:, 1:], ~interpolate[:, :-1])
        ystart, xstart = np.where(work)

        work = np.array(interpolate)
        work[:, :-1] = np.logical_and(interpolate[:, :-1], ~interpolate[:, 1:])
        yend, xend = np.where(work)
        xend += 1  # Make the value one-past-end

        # If we've done this correctly, every run has a start and an end.
        if not np.all(ystart == yend):
            logger.error("Logic problem, ystart and yend not equal.")
            return 1

        # Narrow our list to runs of the desired length range
        use = xend - xstart >= min_cols
        if max_cols is not None:
            use = np.logical_and(xend - xstart <= max_cols, use)
        xstart = xstart[use]
        xend = xend[use]
        ystart = ystart[use]

        # Now determine which runs have valid data at left/right
        xleft = np.maximum(0, xstart - 1)
        has_left = ~np.array(image.mask[ystart, xleft] & invalid_mask,
                             dtype=bool)
        has_left = np.logical_and(xstart >= 1, has_left)
        left_value = image.data[ystart, xleft]

        xright = np.minimum(work.shape[1] - 1, xend)
        has_right = ~np.array(image.mask[ystart, xright] & invalid_mask,
                              dtype=bool)
        has_right = np.logical_and(xend < work.shape[1], has_right)
        right_value = image.data[ystart, xright]

        # Assign right-side value to runs having just right data
        for run in np.where(np.logical_and(~has_left, has_right))[0]:
            image.data[ystart[run], xstart[run]:xend[run]] = right_value[run]
            image.mask[ystart[run],
                       xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP
        # Assign left-side value to runs having just left data
        for run in np.where(np.logical_and(has_left, ~has_right))[0]:
            image.data[ystart[run], xstart[run]:xend[run]] = left_value[run]
            image.mask[ystart[run],
                       xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP

        # Assign mean of left and right to runs having both sides
        for run in np.where(np.logical_and(has_left, has_right))[0]:
            image.data[ystart[run], xstart[run]:xend[run]] = \
              0.5*(left_value[run] + right_value[run])
            image.mask[ystart[run],
                       xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP

        # Add to image history
        image['HISTORY'] = time.asctime(time.localtime()) + \
            ' row_interp over mask 0x{:04X}'.format(interp_mask)

        logger.debug('Finished interpolating')

        ret_code = 0
        return ret_code
示例#21
0
    def __call__(cls,
                 imageIn,
                 imageOut,
                 min_cols=DEFAULT_MINCOLS,
                 max_cols=DEFAULT_MAXCOLS,
                 weight_threshold=DEFAULT_WEIGHT_THRESHOLD,
                 weight_value=DEFAULT_WEIGHT_VALUE):
        """
        Add a mask plane to imageIn and set bits wherever the weight value is <= given
        threshold.  Then interpolate the data plane along columns to replace masked pixels.
        Set weight to weight_value at the masked pixels too.  Masked pixels along edges are
        not interpolated, and are left with input weight.

        :Parameters:
            - `imageIn`: filename of input image
            - `imageOut`: filename of output image
            - `min_cols`: Minimum width of region to be interpolated.
            - `max_cols`: Maximum width of region to be interpolated.
            - `weight_threshold`: Upper bound for weight values to mark as bad
            - `weight_value`: New weight value for bad pixels, enter <0 to use neighbor weights
        """

        logger.info('Preparing coadd {:s} for SExtractor'.format(imageIn))

        # Read weight plane and science plane
        sci, scihdr = fitsio.read(imageIn, ext=0, header=True)
        wt, wthdr = fitsio.read(imageIn, ext=1, header=True)

        # Make mask plane
        mask = wt <= float(weight_threshold)

        # Identify column runs to interpolate, start by marking beginnings of runs
        work = np.array(mask)
        work[1:, :] = np.logical_and(mask[1:, :], ~mask[:-1, :])
        xstart, ystart = np.where(work.T)

        # Now ends of runs
        work = np.array(mask)
        work[:-1, :] = np.logical_and(mask[:-1, :], ~mask[1:, :])
        xend, yend = np.where(work.T)
        yend = yend + 1  # Make the value one-past-end

        # If we've done this correctly, every run has a start and an end, on same col
        if not np.all(xstart == xend):
            logger.error("Logic problem, xstart and xend not equal.")
            print(xstart, xend)  ###
            return 1

        # Narrow our list to runs of the desired length range and
        # not touching the edges
        use = yend - ystart >= min_cols
        if max_cols is not None:
            use = np.logical_and(yend - ystart <= max_cols, use)
        use = np.logical_and(ystart > 0, use)
        use = np.logical_and(yend < mask.shape[0], use)
        ystart = ystart[use]
        yend = yend[use]
        xstart = xstart[use]

        # Assign mean of top and bottom to runs, and fill in weight plane
        for run in range(len(xstart)):
            sci[ystart[run]:yend[run], xstart[run]] = \
              0.5 * (sci[ystart[run] - 1, xstart[run]] +
                     sci[yend[run], xstart[run]])
            if weight_value < 0:
                fill_weight = 0.5 * (wt[ystart[run] - 1, xstart[run]] \
                                     + wt[yend[run], xstart[run]])
            else:
                fill_weight = weight_value
            wt[ystart[run]:yend[run], xstart[run]] = fill_weight

        # Add to image history
        scihdr['HISTORY'] = time.asctime(time.localtime()) + \
            ' coadd_prepare with weight threshold {:f}'.format(weight_threshold)

        # Write out all three planes
        mask = np.array(mask, dtype=np.int16) * cls.MASK_VALUE
        logger.debug('Writing output images')
        with fitsio.FITS(imageOut, mode=fitsio.READWRITE, clobber=True) as ff:
            ff.write(sci, extname='SCI', header=scihdr, clobber=True)
            ff.write(mask, extname='MSK')
            ff.write(wt, extname='WGT', header=wthdr)

        logger.debug('Finished coadd_prepare')

        ret_code = 0
        return ret_code
示例#22
0
    def __call__(cls, image, bpm_im, saturate, clear):
        """Create or update the mask plane of an image

        :Parameters:
            - `image`: the DESImage to operate upon.  Mask plane is created if absent
            - `bpm_im`: the DESBPMImage with the bad pixel mask. Skips BPM step if None
            - `saturate`: boolean flag indicating whether to set BADPIX_SATURATE flags
            - `clear`: if True, clear pre-existing mask.  If False, or new bits with old.

        """

        if image.mask is None:
            image.init_mask()
        elif clear:
            image.mask.fill(0)

        ret_code = 0
        if bpm_im is None and saturate is False:
            logger.warning('Null operation requested in make_mask')
            return ret_code

        #Flag saturated pixels first, if requested
        if saturate:
            # Check for header keyword of whether it's been done
            kw = 'DESSAT'
            if kw in image.header.keys() and not clear:
                logger.warning('Skipping saturation check (' + kw +
                               ' already set)')
            else:
                logger.info('Flagging saturated pixels')
                nsat = 0
                for amp in decaminfo.amps:
                    sec = section2slice(image['DATASEC' + amp])
                    sat = image['SATURAT' + amp]
                    satpix = image.data[sec] >= sat
                    image.mask[sec][satpix] |= BADPIX_SATURATE
                    nsat += np.count_nonzero(satpix)

                image.write_key(kw,
                                time.asctime(time.localtime()),
                                comment='Flag saturated pixels')
                image.write_key('NSATPIX',
                                nsat,
                                comment='Number of saturated pixels')

                logger.debug('Finished flagging saturated pixels')

        #Now fill in BPM
        if bpm_im is not None:
            # Check for header keyword of whether it's been done
            kw = 'DESBPM'
            if kw in image.header.keys() and not clear:
                logger.warning('Skipping BPM application (' + kw +
                               ' already set)')
            else:
                logger.info('Applying BPM')
                try:
                    items_must_match(image, bpm_im, 'CCDNUM')
                except:
                    return 1

                #====Temporary kluge until we get the new BPMS
                #Replace CORR with BIAS_COL
                #bitmask = BPMDEF_CORR
                #mark = (bpm_im.mask & bitmask) != 0
                #bpm_im.mask[mark] |= BPMDEF_BIAS_COL
                # Clear correctable bits from BPM if any are already set
                #bpm_im.mask -= (bpm_im.mask & BPMDEF_CORR)
                #====End kluge

                # Map the following BPM bits to BADPIX_BPM in the image mask
                bitmask = BPMDEF_FLAT_MIN | \
                    BPMDEF_FLAT_MAX | \
                    BPMDEF_FLAT_MASK | \
                    BPMDEF_BIAS_HOT | \
                    BPMDEF_BIAS_WARM | \
                    BPMDEF_BIAS_MASK | \
                    BPMDEF_BIAS_COL | \
                    BPMDEF_FUNKY_COL | \
                    BPMDEF_WACKY_PIX
                # ERICM Removed BPMDEF_CORR and added FUNKY_COL to the above list
                mark = (bpm_im.mask & bitmask) != 0
                image.mask[mark] |= BADPIX_BPM

                # Copy BPM edge pixels to image mask
                bitmask = BPMDEF_EDGE
                mark = (bpm_im.mask & bitmask) != 0
                image.mask[mark] |= BADPIX_EDGE

                # Copy bad amplifier bits to image mask
                bitmask = BPMDEF_BADAMP
                mark = (bpm_im.mask & bitmask) != 0
                image.mask[mark] |= BADPIX_BADAMP

                # Copy SUSPECT BPM bits to image mask
                bitmask = BPMDEF_SUSPECT
                mark = (bpm_im.mask & bitmask) != 0
                image.mask[mark] |= BADPIX_SUSPECT

                # Copy NEAREDGE BPM bits to image mask
                bitmask = BPMDEF_NEAREDGE
                mark = (bpm_im.mask & bitmask) != 0
                image.mask[mark] |= BADPIX_NEAREDGE

                # Copy TAPEBUMP BPM bits to image mask
                bitmask = BPMDEF_TAPEBUMP
                mark = (bpm_im.mask & bitmask) != 0
                image.mask[mark] |= BADPIX_TAPEBUMP

                # Mark correctable pixels.
                # Pixels flagged as BPMDEF_BIAS_COL and BPMDEF_FUNKY_COL may be correctable.
                # We flag them in the image as bad (BADPIX_BPM), but - if fix_columns is run,
                # the BADPIX_BPM flag will be cleared and the BADPIX_FIX flag will be set
                # For each column find the number of pixels flagged as BIAS_HOT and BIAS_COL
                N_BIAS_HOT = np.sum((bpm_im.mask & BPMDEF_BIAS_HOT) > 0,
                                    axis=0)
                N_BIAS_COL = np.sum((bpm_im.mask & BPMDEF_BIAS_COL) > 0,
                                    axis=0)
                maskwidth = bpm_im.mask.shape[1]
                # First do columns with N_BIAS_COL set for 1 or more pixels
                biascols = np.arange(maskwidth)[(N_BIAS_COL > 0)]
                for icol in biascols:
                    #Clear FUNKY_COL bit if set for all pixels in this column
                    #The reason for clearing the bit is that the FUNKY_COL detection is
                    #sensitive to hot bias pixels and may flag those columns by "mistake"
                    #First clear BAD BPM bit if set because of funky column
                    image.mask[:, icol][bpm_im.mask[:, icol] ==
                                        BPMDEF_FUNKY_COL] &= ~BADPIX_BPM
                    bpm_im.mask[:, icol] -= (bpm_im.mask[:, icol]
                                             & BPMDEF_FUNKY_COL)
                    #Correctable columns have exactly 1 BIAS_HOT pixel
                    if N_BIAS_HOT[icol] == 1:
                        #Correctable pixels have BIAS_COL bit set
                        bpm_im.mask[:, icol][(
                            bpm_im.mask[:, icol]
                            & BPMDEF_BIAS_COL) > 0] |= BPMDEF_CORR
                        logger.info('Column ' + str(icol) +
                                    ' has 1 hot pixel and is correctable.')
                    else:
                        logger.info('Column ' + str(icol) + ' has ' +
                                    str(N_BIAS_HOT[icol]) +
                                    ' hot pixels and is NOT correctable.')

                #Now do columns with FUNKY_COL set.  Note that the FUNKY_COL bits have been cleared above
                #for hot bias columns
                N_FUNKY_COL = np.sum((bpm_im.mask & BPMDEF_FUNKY_COL) > 0,
                                     axis=0)
                funkycols = np.arange(maskwidth)[(N_FUNKY_COL > 0)]
                for icol in funkycols:
                    #Correctable pixels have FUNKY_COL bit set
                    bpm_im.mask[:,
                                icol][(bpm_im.mask[:, icol]
                                       & BPMDEF_FUNKY_COL) > 0] |= BPMDEF_CORR
                    logger.info('Column ' + str(icol) +
                                ' is funky and correctable.')

                image[kw] = time.asctime(time.localtime())
                image.write_key(kw,
                                time.asctime(time.localtime()),
                                comment='Construct mask from BPM')
                if bpm_im.sourcefile is None:
                    image.write_key('BPMFIL',
                                    'UNKNOWN',
                                    comment='BPM file used to build mask')
                else:
                    image.write_key('BPMFIL',
                                    path.basename(bpm_im.sourcefile),
                                    comment='BPM file used to build mask')

                logger.debug('Finished applying BPM')

        return ret_code
示例#23
0
    def __call__(cls, image, fit_filename, pc_filename, weight, dome,
                 skymodel_filename):
        """
        Subtract sky from image using previous principal-components fit. Optionally
        build weight image from fitted sky or all counts, in which case the dome flat
        is needed and proper gain values are expected in the image header.

        :Parameters:
            - `image`: DESImage that has been flattened with dome already and fit
            - `fit_filename`: filename with the coefficients from minisky fitting.  Sky
                              subtraction is skipped if this is None.
            - `pc_filename`: filename for the stored full-res sky principal components
            - `weight`: 'none' to skip weights, 'sky' to calculate weight at sky level,
                         'all' to use all counts
            - `dome`: DESImage for the dome flat, needed if weight is not 'none'.
            - `skymodel_filename`: optional output filename for 'sky'
        """

        if weight == 'sky' and fit_filename is None:
            raise SkyError(
                'Cannot make sky-only weight map without doing sky subtraction'
            )

        if fit_filename is not None:
            logger.info('Subtracting sky')
            mini = skyinfo.MiniDecam.load(fit_filename)
            templates = skyinfo.SkyPC.load(pc_filename)
            if templates.detpos != image['DETPOS']:
                # Quit if we don't have the right CCD to subtract
                logger.error(
                    'Image DETPOS {:s} does not match sky template {:s}'.
                    format(templates.detpos, image['DETPOS']))
                return 1
            try:
                image['BAND']
            except:
                image['BAND'] = decaminfo.get_band(image['FILTER'])
            try:
                items_must_match(image, mini.header, 'BAND', 'EXPNUM')
                items_must_match(image, templates.header, 'BAND')
                # ??? Could check that template and image use same dome flat
            except:
                return 1
            sky = templates.sky(mini.coeffs)
            image.data -= sky
            image.write_key('SKYSBFIL',
                            path.basename(pc_filename),
                            comment='Sky subtraction template file')
            for i, c in enumerate(mini.coeffs):
                image.write_key('SKYPC{:>02d}'.format(i),
                                c,
                                comment='Sky template coefficient')
            logger.info('Finished sky subtraction')
            #
            #           Optionally write the sky model that was subtracted from the image.
            #
            if skymodel_filename is not None:
                # Create HDU for output skymodel, add some header info, save output to file
                logger.info('Optional output of skymodel requested')
                skymodel_image = DESDataImage(sky)
                skymodel_image.write_key(
                    'SKYSBFIL',
                    path.basename(pc_filename),
                    comment='Sky subtraction template file')
                for i, c in enumerate(mini.coeffs):
                    skymodel_image.write_key(
                        'SKYPC{:>02d}'.format(i),
                        c,
                        comment='Sky template coefficient')
                skymodel_image.write_key('BAND', image['BAND'], comment='Band')
                skymodel_image.write_key('EXPNUM',
                                         image['EXPNUM'],
                                         comment='Exposure Number')
                skymodel_image.write_key('CCDNUM',
                                         image['CCDNUM'],
                                         comment='CCD Number')
                skymodel_image.write_key('NITE',
                                         image['NITE'],
                                         comment='Night')
                #               skymodel_image.copy_header_info(image, cls.propagate, require=False)
                ## ?? catch exception from write error below?
                skymodel_image.save(skymodel_filename)

        else:
            sky = None

        if weight == 'none':
            do_weight = False
            sky_weight = False
        elif weight == 'sky':
            do_weight = True
            sky_weight = True
        elif weight == 'all':
            do_weight = True
            sky_weight = False
        else:
            raise SkyError('Invalid weight value: ' + weight)

        if do_weight:
            if dome is None:
                raise SkyError(
                    'sky_subtract needs dome flat when making weights')

            if sky_weight:
                logger.info('Constructing weight image from sky image')
                data = sky
            else:
                logger.info('Constructing weight image from all counts')
                if sky is None:
                    # If we did not subtract a sky, the image data gives total counts
                    data = image.data
                else:
                    # Add sky back in to get total counts
                    data = image.data + sky

            if image.weight is not None or image.variance is not None:
                image.weight = None
                image.variance = None
                logger.warning('Overwriting existing weight image')
            """
            We assume in constructing the weight (=inverse variance) image that
            the input image here has been divided by the dome flat already, and that
            its GAIN[AB] keywords are correct for a pixel that has been divided
            by the FLATMED[AB] of the flat image.  So the number of *electrons* that
            were read in a pixel whose current value=sky is
            e = sky * (dome/FLATMED) * GAIN


            The variance has three parts: read noise, and sky Poisson noise, and
            multiplicative errors from noise in the flat field.
            The read noise variance, in electrons, is
            Var = RDNOISE^2
            ...and the shot noise from sky was, in electrons,
            Var = sky * (dome/FLATMED) * GAIN

            This means the total variance in the image, in its present form, is

            Var = (RDNOISE * FLATMED / dome / GAIN)^2 + (FLATMED/GAIN)*sky/dome

            We can also add the uncertainty propagated from shot noise in the dome flat,
            if the dome image has a weight or variance.  In which case we would add

            Var += var(dome) * sky^2 / dome^2

            (remembering that sky has already been divided by the dome).

            If sky_weight = False, we can substitute the image data for sky in the above
            calculations.
            """

            # Transform the sky image into a variance image
            var = np.array(data, dtype=weight_dtype)
            for amp in decaminfo.amps:
                sec = section2slice(image['DATASEC' + amp])
                invgain = (image['FLATMED' + amp] /
                           image['GAIN' + amp]) / dome.data[sec]
                var[sec] += image['RDNOISE' + amp]**2 * invgain
                var[sec] *= invgain
            # Add noise from the dome flat shot noise, if present
            if dome.weight is not None:
                var += data * data / (dome.weight * dome.data * dome.data)
            elif dome.variance is not None:
                var += data * data * dome.variance / (dome.data * dome.data)

            image.variance = var

            # Now there are statistics desired for the output image header.
            # First, the median variance at sky level on the two amps, SKYVAR[AB]
            meds = []
            for amp in decaminfo.amps:
                sec = section2slice(image['DATASEC' + amp])
                v = np.median(var[sec][::4, ::4])
                image.write_key(
                    'SKYVAR' + amp,
                    v,
                    comment='Median noise variance at sky level, amp ' + amp)
                meds.append(v)
            # SKYSIGMA is overall average noise level
            image.write_key('SKYSIGMA',
                            np.sqrt(np.mean(meds)),
                            comment='RMS noise at sky level')
            # SKYBRITE is a measure of sky brightness.  Use the sky image if we've got it, else
            # use the data
            if sky is None:
                skybrite = np.median(data[::4, ::4])
            else:
                skybrite = np.median(sky[::2, ::2])
            image.write_key('SKYBRITE',
                            skybrite,
                            comment='Median sky brightness')

            logger.debug('Finished weight construction')

            # Run null_mask or resaturate if requested in the command-line
            if cls.do_step('null_mask') or cls.do_step('resaturate'):
                logger.info("Running null_weights")
                # We need to fix the step_name if we want to call 'step_run'
                null_weights.__class__.step_name = config_section
                #null_weights.__class__.step_name = cls.config_section
                null_weights.step_run(image, cls.config)

        ret_code = 0
        return ret_code
示例#24
0
    def _doit(cls, image, flat_im):
        """Apply a flat field correction to an image - used for both dome
        and star flats.

        :Parameters:
            - `image`: the DESImage to apply a bias correction
            - `flat_im`:  the flat correction image to apply

        Applies the correction "in place"
        """
        logger.info('Applying Flat')

        # Check that flat and data are from same CCD and filter
        try:
            image['BAND']
        except:
            # Give image a BAND from its FILTER if it's not there
            image['BAND'] = decaminfo.get_band(image['FILTER'])
        try:
            items_must_match(image, flat_im, 'CCDNUM', 'BAND')
        except:
            return 1

        # Apply flat to the data
        image.data /= flat_im.data

        # Update variance or weight image if it exists
        if image.weight is not None:
            image.weight *= flat_im.data * flat_im.data
        if image.variance is not None:
            image.variance /= flat_im.data * flat_im.data

        # If mask image exists, mark as BADPIX_BPM any pixels that have
        # non-positive flat and are not already flagged.
        if image.mask is not None:
            # Find flat-field pixels that are invalid but not already bad for
            # one of these reasons:
            badmask = maskbits.BADPIX_BPM +\
                maskbits.BADPIX_BADAMP +\
                maskbits.BADPIX_EDGE
            badflat = np.logical_and(flat_im.data <= 0., image.mask & badmask)
            mark_these = np.where(badflat.flatten())[0]
            image.mask.flatten()[mark_these] |= maskbits.BADPIX_BPM

        # If a weight or variance image already exists, add to it any additional
        # variance from the flat:
        if (image.weight is not None or image.variance is not None):
            if flat_im.weight is not None:
                var = image.get_variance()
                f2 = flat_im.data * flat_im.data
                var *= f2
                var += image.data * image.data / (flat_im.weight * f2)
            elif flat_im.variance is not None:
                var = image.get_variance()
                f2 = flat_im.data * flat_im.data
                var *= f2
                var += image.data * image.data * flat_im.variance / f2

        # Update header keywords for rescaling
        saturate = 0.
        scales = []
        for amp in decaminfo.amps:
            # Acquire the typical scaling factor for each amp from the flat
            scalekw = 'FLATMED' + amp
            if scalekw in flat_im.header.keys():
                # Already stored in the flat's header:
                scale = flat_im[scalekw]
            else:
                # Figure it out ourselves from median of a subsample:
                #                sec = DESImage.section2slice(image['DATASEC'+amp])
                sec = section2slice(image['DATASEC' + amp])
                scale = np.median(flat_im.data[sec][::4, ::4])
            scales.append(scale)
            if scalekw in image.header.keys():
                # Add current scaling to any previous ones
                image[scalekw] = image[scalekw] * scale
            else:
                image[scalekw] = scale
            image['GAIN' + amp] = image['GAIN' + amp] * scale
            image['SATURAT' + amp] = image['SATURAT' + amp] / scale
            # Scale the SKYVAR if it's already here
            kw = 'SKYVAR' + amp
            if kw in image.header.keys():
                image[kw] = image[kw] / (scale * scale)
            saturate = max(saturate, image['SATURAT' + amp])
        # The SATURATE keyword is assigned to maximum of the amps' values.
        image['SATURATE'] = saturate

        # Some other keywords that we will adjust crudely with mean rescaling
        # if they are present:
        scale = np.mean(scales)
        for kw in ('SKYBRITE', 'SKYSIGMA'):
            if kw in image.header.keys():
                image[kw] = image[kw] / scale

        logger.debug('Finished applying Flat')
        ret_code = 0
        return ret_code
示例#25
0
    def __call__(cls,
                 in_filename,
                 out_filename,
                 ccdnum,
                 input_template=None,
                 input_list=None,
                 good_filename=None,
                 reject_rms=None,
                 mem_use=8.,
                 bitmask=skyinfo.DEFAULT_SKYMASK):
        """
        Create full-resolution sky templates based on previous PCA.
        Does this pixel by pixel, via robust fitting of the data in the input
        full-res images to the PCA coefficients.  The full-res input images' filenames
        are determined from the EXPNUM _either_ by python formatting of the string
        given in -input_template _or_ by looking at the list of expnum, filename pairs
        in the file specified by -input-list.
        Output FITS image has an extension NGOOD giving number of
        images used in fit at each pixel.

        :Parameters:
            - `in_filename`: the file holding the PCA outputs on compressed sky
            - `out_filename`: filename for the output template
            - `ccdnum`: which CCD to produce templates for
            - `input_template`: string that can be formatted with the expnum to yield
                            filename of the DESImage holding the full-res data.
            - `input_list`: name of a file containing expnum, filename pairs, one pair per
                            line, separated by whitespace.
            - `good_filename`: Name of a FITS file in which to save number of images
                            contributing to each pixel's fit.  No output if None.
            - `reject_rms`: Exclude exposures with fractional RMS residual sky above this.
                            If this is None, just uses the exposures that PCA used.
            - `mem_use:` Number of GB to target for memory usage (Default = 8)
            - `bitmask:` Applied to MASK extension of images for initial bad-pixel
                            exclusion.
        """

        logger.info('Starting sky template construction')

        # Need exactly one of these two arguments:
        if not input_template is None ^ input_list is None:
            logger.error(
                'Need exactly one of input_template and input_list to be given'
            )
            return 1

        # Acquire PCA information, including the table of info on input exposures
        pc = skyinfo.MiniskyPC.load(in_filename)
        pctab = skyinfo.MiniskyPC.get_exposures(in_filename)

        # Build a MiniDECam that has our choice of CCDs that we can use for indexing.
        mini = pc.get_pc(0)
        # Quit if we are requesting template for a CCD that was not compressed
        detpos = decaminfo.detpos_dict[ccdnum]
        try:
            mini.index_of(detpos, 1, 1)
        except skyinfo.SkyError:
            logger.error('Template requested for CCDNUM not included in PCA')
            return 1

        # Select exposures we'll use
        if reject_rms is None:
            # If no RMS threshold is specified, use the same exposures
            # that were kept during PCA of compressed skies
            use = np.array(pctab['USE'])
        else:
            # Choose our own threshold
            use = pctab['RMS'] < reject_rms

        # Get filenames for the full-res images from list:
        if input_list is not None:
            filenames = {}
            flist = np.loadtxt(input_list, dtype=str)
            for expnum, filename in flist:
                filenames[int(expnum)] = filename
            del flist
            # Now warn if we are missing expnums and remove from usable exposure list
            for i, val in enumerate(use):
                if val and int(pctab['EXPNUM'][i]) not in filenames.keys():
                    use[i] = False
                    logger.warning('No input filename given for expnum ' +
                                   str(expnum))

        nimg = np.count_nonzero(use)

        expnums = []
        vv = []
        for i, val in enumerate(use):
            if val:
                vv.append(pctab['COEFFS'][i])
                expnums.append(pctab['EXPNUM'][i])
        V = np.vstack(vv)
        del vv

        # We'll re-normalize each exposure, and its coefficients, by V[0]
        norms = np.array(V[:, 0])
        V = V.T / norms  # V is now of shape (npc,nimg)

        # The linear solutions will require this:
        ainv = np.linalg.inv(np.dot(V, V.T))

        nexp = V.shape[1]
        npc = pc.U.shape[1]
        ySize = decaminfo.shape[0]
        xSize = decaminfo.shape[1]

        # Create the output array
        out = np.zeros((npc, ySize, xSize), dtype=np.float32)

        # And an array to hold the number of exposures used at each pixel:
        if good_filename is not None:
            ngood = np.zeros((ySize, xSize), dtype=np.int16)

        # Decide how many rows of blocks we'll read from files at a time
        bytes_per_row = 4 * xSize * pc.blocksize * nimg
        xBlocks = xSize / pc.blocksize
        yBlocks = min(int(np.floor(0.8 * mem_use * (2**30) / bytes_per_row)),
                      ySize / pc.blocksize)

        if yBlocks < 1:
            logger.warning(
                'Proceeding even though mem_use is not enough to store 1 row of blocks'
            )
            yBlocks = 1

        d = {'ccd': ccdnum}  # A dictionary used to assign names to files
        hdr = {}  # A dictionary of information to go into output image header
        # A mask of zero is equivalent to no masking:
        if bitmask == 0:
            bitmask = None

        nonConvergentBlocks = 0  # Keep count of blocks where clipping does not converge.

        # Collect input data in chunks of yBlocks rows of blocks, then process one block at a time.
        for yStart in range(0, ySize, yBlocks * pc.blocksize):
            # Acquire the pixel data into a 3d array
            yStop = min(ySize, yStart + yBlocks * pc.blocksize)
            logger.info('Working on rows {:d} -- {:d}'.format(yStart, yStop))
            data = np.zeros((nimg, yStop - yStart, xSize), dtype=np.float32)
            # Mask image:
            mask = np.zeros((nimg, yStop - yStart, xSize), dtype=bool)

            for i, expnum in enumerate(expnums):
                d['expnum'] = expnum
                if input_template is None:
                    # Get the filename from the input list
                    filename = filenames[expnum]
                else:
                    # Get the filename from formatting the template
                    filename = input_template.format(**d)
                logger.debug('Getting pixels from ' + filename)
                with fitsio.FITS(filename) as fits:
                    data[i, :, :] = fits['SCI'][yStart:yStop, :xSize]
                    if bitmask is None:
                        mask[i, :, :] = True
                    else:
                        m = np.array(fits['MSK'][yStart:yStop, :xSize],
                                     dtype=np.int16)
                        mask[i, :, :] = (m & bitmask) == 0
                        del m
                    if yStart == 0:
                        # First time through the images we will be collecting/checking
                        # header information from the contributing images
                        hdrin = fits['SCI'].read_header()
                        usehdr = {}
                        if 'BAND' in hdrin.keys():
                            usehdr['BAND'] = hdrin['BAND']
                        elif 'FILTER' in hdrin.keys():
                            usehdr['BAND'] = decaminfo.get_band(
                                hdrin['FILTER'])
                        else:
                            logger.error('No BAND or FILTER in ' + filename)
                            return 1
                        if 'NITE' in hdrin.keys():
                            usehdr['NITE'] = hdrin['NITE']
                        elif 'DATE-OBS' in hdrin.keys():
                            usehdr['NITE'] = decaminfo.get_nite(
                                hdrin['DATE-OBS'])
                        else:
                            logger.error('No NITE or DATE-OBS in ' + filename)
                            return 1
                        if 'FLATFIL' in hdrin.keys():
                            usehdr['FLATFIL'] = hdrin['FLATFIL']
                        else:
                            logger.error('No FLATFIL in ' + filename)
                            return 1
                        if 'CCDNUM' in hdrin.keys():
                            usehdr['CCDNUM'] = hdrin['CCDNUM']
                        else:
                            logger.error('No CCDNUM in ' + filename)
                            return 1
                        if hdr:
                            # First exposure will establish values for the output
                            hdr['BAND'] = usehdr['BAND']
                            hdr['MINNITE'] = usehdr['NITE']
                            hdr['MAXNITE'] = usehdr['NITE']
                            hdr['CCDNUM'] = usehdr['CCDNUM']
                            if hdr['CCDNUM'] != ccdnum:
                                logger.error(
                                    'Wrong ccdnum {:d} in {:s}'.format(
                                        ccdnum, filename))
                            hdr['FLATFIL'] = usehdr['FLATFIL']
                        else:
                            # Check that this exposure matches the others
                            try:
                                items_must_match(hdr, usehdr, 'BAND', 'CCDNUM',
                                                 'FLATFIL')
                            except:
                                return 1
                            hdr['MINNITE'] = min(hdr['MINNITE'],
                                                 usehdr['NITE'])
                            hdr['MAXNITE'] = max(hdr['MAXNITE'],
                                                 usehdr['NITE'])

            data /= norms[:, np.newaxis,
                          np.newaxis]  # Apply norms to be near unity

            # Now cycle through all blocks
            for jb in range((yStop - yStart) / pc.blocksize):
                for ib in range(xSize / pc.blocksize):
                    logger.debug('Fitting for block ({:d},{:d})'.format(
                        jb + yStart / pc.blocksize, ib))
                    if ccdnum == decaminfo.ccdnums['S7'] and \
                       pc.halfS7 and \
                       ib >= xSize / pc.blocksize / 2:
                        # If we are looking at the bad amp of S7, we'll just
                        # store the median of the normalized images in PC0.
                        # The other PC's stay at zero.
                        out[0,
                            yStart + jb * pc.blocksize: yStart + (jb + 1) * pc.blocksize,
                            ib * pc.blocksize: (ib + 1) * pc.blocksize] = \
                            np.median(data[:,
                                           jb * pc.blocksize: (jb + 1) * pc.blocksize,
                                           ib * pc.blocksize: (ib + 1) * pc.blocksize],
                                      axis=0)
                        continue

                    # Use PCA of this block as starting guess at solution
                    index = mini.index_of(detpos, yStart / pc.blocksize + jb,
                                          ib)
                    guess = np.array(pc.U[index, :])

                    # Extract the data for this block into (nexp,npix) array
                    block = np.array(
                        data[:, jb * pc.blocksize:(jb + 1) * pc.blocksize,
                             ib * pc.blocksize:(ib + 1) * pc.blocksize])
                    block.resize(nexp, pc.blocksize * pc.blocksize)

                    bmask = np.array(
                        mask[:, jb * pc.blocksize:(jb + 1) * pc.blocksize,
                             ib * pc.blocksize:(ib + 1) * pc.blocksize])
                    bmask.resize(nexp, pc.blocksize * pc.blocksize)

                    # We'll scale the guess in each pixel by the typical ratio
                    # of this pixel's data to the PCA model for the block, and
                    # also estimate noise as dispersion about this guess
                    model = np.dot(guess, V)
                    ratio = block / model[:, np.newaxis]
                    scale, var, n = clippedMean(ratio, 4, axis=0)
                    clip = 3. * np.sqrt(var.data) * scale.data

                    # First guess at solution is the outer product of superblock PCA
                    # with the scaling per pixel
                    soln = guess[:, np.newaxis] * scale.data
                    del scale, var, ratio, n

                    # Linear solution with clipping iteration
                    MAX_ITERATIONS = 20
                    TOLERANCE = 0.0001
                    for i in range(MAX_ITERATIONS):
                        model = np.dot(V.T, soln)
                        # Residuals from model are used to clip
                        resid = block - model
                        # Find clipped points and masked ones
                        good = np.logical_and(resid < clip, resid > -clip)
                        good = np.logical_and(good, bmask)
                        # Set residual to zero at bad pixels
                        resid[~good] = 0.
                        # Get shift in linear solution from residuals:
                        dsoln = np.dot(ainv, np.dot(V, resid))
                        soln += dsoln
                        # Calculate largest change in model as convergence criterion
                        shift = np.max(np.abs(np.dot(V.T, dsoln)))
                        logger.debug('Iteration {:d}, model shift {:f}'.format(
                            i, shift))
                        if shift < TOLERANCE:
                            break
                        if i == MAX_ITERATIONS - 1:
                            nonConvergentBlocks = nonConvergentBlocks + 1

                    # Save results into big matrices
                    soln.resize(npc, pc.blocksize, pc.blocksize)
                    out[:, yStart + jb * pc.blocksize:yStart +
                        (jb + 1) * pc.blocksize,
                        ib * pc.blocksize:(ib + 1) * pc.blocksize] = soln
                    if good_filename is not None:
                        # Gin up a masked array because it allows counting along an axis
                        nblock = np.ma.count_masked(\
                            np.ma.masked_array(np.zeros_like(good), good), axis=0)
                        nblock.resize(pc.blocksize, pc.blocksize)
                        ngood[yStart + jb * pc.blocksize:yStart +
                              (jb + 1) * pc.blocksize, ib *
                              pc.blocksize:(ib + 1) * pc.blocksize] = nblock
                        del nblock
                    del resid, model, good, dsoln, block
            del data

        if nonConvergentBlocks > 0:
            logger.warning(
                'Clipping did not converge for {:d} blocks out of {:d}'.format(
                    nonConvergentBlocks, xBlocks * (ySize / pc.blocksize)))

        # Add a history line about creation here
        hdr['HISTORY'] = time.asctime(time.localtime()) + \
            ' Build sky template from PCA file {:s}'.format(path.basename(in_filename))
        # Save the template into the outfile
        spc = skyinfo.SkyPC(out, detpos, header=hdr)
        spc.save(out_filename)
        del out

        # Save the number of good sky pixels in another extension
        if good_filename is not None:
            gimg = DESDataImage(ngood,
                                header={
                                    'DETPOS': detpos,
                                    'CCDNUM': ccdnum
                                })
            logger.debug('Writing ngood to ' + good_filename)
            gimg.save(good_filename)
            del gimg, ngood

        logger.debug('Finished sky template')
        ret_code = 0
        return ret_code