def mask_cti(image, CTI, ctiDict, verbose=0): """Function to mask the region affected by a lightbulb""" if ctiDict['isCTI']: sec = section2slice(image['DATASEC' + CTI['amp']]) image.mask[sec] |= BADPIX_BADAMP logger.info(' CTI: mask applied to image') return image
def __call__(cls, image): """Convert pixel values from ADU to electrons, including weight or variance image and critical keywords. :Parameters: - `image`: the DESImage for pixel values to be converted Applies the correction "in place" """ logger.info('Gain Correcting Image') saturate = 0. gains = [] for amp in decaminfo.amps: sec = section2slice(image['DATASEC' + amp]) gain = image['GAIN' + amp] gains.append(gain) image.data[sec] *= gain # Adjust the weight or variance image if present: if image.weight is not None: image.weight[sec] *= 1. / (gain * gain) if image.variance is not None: image.variance[sec] *= gain * gain # Adjust keywords image['GAIN' + amp] = image['GAIN' + amp] / gain image['SATURAT' + amp] = image['SATURAT' + amp] * gain saturate = max(saturate, image['SATURAT' + amp]) # Scale the SKYVAR if it's already here kw = 'SKYVAR' + amp if kw in image.header.keys(): image[kw] = image[kw] * gain * gain # The FLATMED will keep track of rescalings *after* gain: image['FLATMED' + amp] = 1. # The SATURATE keyword is assigned to maximum of the two amps. image['SATURATE'] = saturate # Some other keywords that we will adjust crudely with mean gain # if they are present: gain = np.mean(gains) for kw in ('SKYBRITE', 'SKYSIGMA'): if kw in image.header.keys(): image[kw] = image[kw] * gain # One other keyword to adjust: image['BUNIT'] = 'electrons' logger.debug('Finished applying Gain Correction') ret_code = 0 return ret_code
def __call__(cls, image, dome): """Add a weight plane :Parameters: - `image`: the DESImage for weight plane to be added Applies "in place" """ logger.info('Adding Weight Image') if image.weight is None: image.init_weight() # Check that dome and data are from same CCD try: items_must_match(image, dome, 'CCDNUM') except: return 1 # Transform the sky image into a variance image data = image.data var = np.array(data, dtype=weight_dtype) for amp in decaminfo.amps: sec = section2slice(image['DATASEC' + amp]) invgain = (image['FLATMED' + amp] / image['GAIN' + amp]) / dome.data[sec] var[sec] += image['RDNOISE' + amp]**2 * invgain var[sec] *= invgain # Add noise from the dome flat shot noise, if present if dome.weight is not None: var += data * data / (dome.weight * dome.data * dome.data) elif dome.variance is not None: var += data * data * dome.variance / (dome.data * dome.data) image.weight = 1.0 / var logger.info('Finished building a weight plane') else: logger.info('Weight plane already present... skipping.') ret_code = 0 return ret_code
def check_cti(image, CTI, verbose=0): """Function to check for presence of CTI""" # # Initialize ctiDict # ctiDict = {'isCTI': False} ctiDict['expnum'] = image['EXPNUM'] # Also create the BAND and NITE keywords if they are not present try: image['BAND'] except: image['BAND'] = decaminfo.get_band(image['FILTER']) try: image['NITE'] except: image['NITE'] = decaminfo.get_nite(image['DATE-OBS']) band = image['BAND'].strip() sec = section2slice(image['DATASEC' + CTI['amp']]) # # This could become useful if it is necessary to start examining the opposite amplifier in # conjunction with the amplifier that is having a problem # # if (CTI['amp']=="A"): # osec = section2slice(image['DATASEC'+'B']) # else: # osec = section2slice(image['DATASEC'+'A']) maxiter = 10 converge_num = 0.0001 clipsig = 3.0 clip_avg, clip_med, clip_std = lb.medclip(image.data[sec], clipsig, maxiter, converge_num, verbose=0) logger.info(' CTI: Global(clipped): median = {:.3f}, stddev = {:.3f} '.format(clip_med, clip_std)) ctiDict['cmed'] = float(clip_med) ctiDict['cstd'] = float(clip_std) clow = clip_med - (3.0 * clip_std) ctiDict['clow'] = float(clow) # oclip_avg,oclip_med,oclip_std=medclip(image.data[osec],clipsig,maxiter,converge_num,verbose) # print(" Global(oclipped): median = {:.3f}, stddev = {:.3f} ".format(oclip_med,oclip_std)) # oclow=oclip_med-(3.0*oclip_std) # # Obtain row-by-row median to look for horizontal striping (also needed to check/reject edgebleeds) # row_med = np.median(image.data[sec], axis=1) wsm = np.where(row_med < clow) nrow_low = row_med[wsm].size # # Hacky attempt to check for edge-bleed # iedge = [4, 4091] while row_med[iedge[0]] < clow: iedge[0] = iedge[0] + 1 while row_med[iedge[1]] < clow: iedge[1] = iedge[1] - 1 if iedge[0] == 4: iedge[0] = 0 if iedge[1] == 4091: iedge[1] = 4095 nrow_edge = 4096 - (iedge[1] - iedge[0] + 1) logger.info(' CTI: Number of low rows: {:d} (nrow_edge={:d}) '.format(nrow_low, nrow_edge)) # # Blank out pixels that are below the 3-sigma level with respect to median # This removes power from vertical stripes # wsm = np.where(image.data[sec] < clow) npix_low = image.data[sec][wsm].size logger.info(' CTI: Number of low pixels: {:d} '.format(npix_low)) u = image.data[sec] - clip_med u[wsm] = 0.0 # # Harder cut currently not needed. If used this would get rid of all pixels below the median level # (effectively this reduces the amount that noise suppresses contrast of the auto-correlation signal from CTI) # # wsm=np.where(u<0.) # npix_zero=u[wsm].size # logger.info(' CTI: Number of sub-zero pixels: {:d} '.format(npix_zero)) # u[wsm]=0.0 # # Calculate a set of auto-correlations by sampling lags in the x-direction and # then two diaganol sets at PA=+/-45 degrees # Note: y-direction lags would be succeptible to both bad columns and bleeds. # These are normalized by the auto-correlation with lag 0 (defined as 'a' below). # Take a maximum lag that will be calculated and use that to trim the image. # Note: This both gets rid of most edge-effects automatically but also removes the need to calculate an effective normalization for higher lags # maxlag = 100 lagList = [0, 1, 3, 5, 7, 11, 15, 19, 23, 31, 37, 45] a = np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag:-maxlag]) # b=np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag:-maxlag]) x = [1.0] d1 = [1.0] d2 = [1.0] # vx=[1.0] # vd1=[1.0] # vd2=[1.0] # # More lags than those sampled are needed because the diagonal (PA=+/-45) measures will need to be interpolated # for comaparison to lags in the x-direction. # for lag in lagList: if lag != 0: x.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag - lag:-maxlag - lag]) / a) d1.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag - lag:-maxlag - lag]) / a) d2.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag + lag:-maxlag + lag]) / a) # vx.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag-lag:-maxlag-lag])/b) # vd1.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag-lag:-maxlag-lag])/b) # vd2.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag+lag:-maxlag+lag])/b) data = {'lag': np.array(lagList), 'x': np.array(x), 'd1': np.array(d1), 'd2': np.array(d2) # 'vx':np.array(vx), # 'vd1':np.array(vd1), # 'vd2':np.array(vd2) } r2 = np.sqrt(2.0) l1 = data['lag'] l2 = data['lag'] * r2 x1 = data['x'] d1i = np.interp(data['lag'], l2, data['d1']) d2i = np.interp(data['lag'], l2, data['d2']) rd1 = data['x'] / d1i rd2 = data['x'] / d2i # vx1=data['vx'] # vd1i=np.interp(data['lag'],l2,data['vd1']) # vd2i=np.interp(data['lag'],l2,data['vd2']) # vrd1=data['vx']/vd1i # vrd2=data['vx']/vd2i ## vdx=data['x']/data['vx'] # vdx=(rd1+rd2)/(vrd1+vrd2) logger.info(' CTI: lags {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(l1[3], l1[4], l1[6], l1[8], l1[10])) logger.info(' CTI: lx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(x1[3], x1[4], x1[6], x1[8], x1[10])) logger.info(' CTI: d1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d1i[3], d1i[4], d1i[6], d1i[8], d1i[10])) logger.info(' CTI: d2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d2i[3], d2i[4], d2i[6], d2i[8], d2i[10])) logger.info(' CTI: ld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd1[3], rd1[4], rd1[6], rd1[8], rd1[10])) logger.info(' CTI: ld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd2[3], rd2[4], rd2[6], rd2[8], rd2[10])) # logger.info(' CTI: lvx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vx1[3],vx1[4],vx1[6],vx1[8],vx1[10])) # logger.info(' CTI:vd1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd1i[3],vd1i[4],vd1i[6],vd1i[8],vd1i[10])) # logger.info(' CTI:vd2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd2i[3],vd2i[4],vd2i[6],vd2i[8],vd2i[10])) # logger.info(' CTI:vld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd1[3],vrd1[4],vrd1[6],vrd1[8],vrd1[10])) # logger.info(' CTI:vld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd2[3],vrd2[4],vrd2[6],vrd2[8],vrd2[10])) # logger.info(' CTI:vdx0 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vdx[3],vdx[4],vdx[6],vdx[8],vdx[10])) # # Set band dependent thresholds... # Note the criteria used are based on an empirical study of the one example we currently have (CCD=41, Y6) # nrow_lim = 5 if band != "Y": cclim = 0.9 else: cclim = 1.15 # # Now check and set flag based on empirical critera. # First are the horizontal streaks that can appear... # Second are the comparison of the auto-correlation in the x and average of the diaganol directrions # flag_cti = False if nrow_low - nrow_edge >= nrow_lim: flag_cti = True avg_rd = (rd1 + rd2) / 2.0 if avg_rd[3] > cclim and avg_rd[4] > cclim and avg_rd[6] > cclim: flag_cti = True if flag_cti: ctiDict['isCTI'] = True return ctiDict
def __call__(cls, image, fit_filename, pc_filename, weight, dome, skymodel_filename): """ Subtract sky from image using previous principal-components fit. Optionally build weight image from fitted sky or all counts, in which case the dome flat is needed and proper gain values are expected in the image header. :Parameters: - `image`: DESImage that has been flattened with dome already and fit - `fit_filename`: filename with the coefficients from minisky fitting. Sky subtraction is skipped if this is None. - `pc_filename`: filename for the stored full-res sky principal components - `weight`: 'none' to skip weights, 'sky' to calculate weight at sky level, 'all' to use all counts - `dome`: DESImage for the dome flat, needed if weight is not 'none'. - `skymodel_filename`: optional output filename for 'sky' """ if weight == 'sky' and fit_filename is None: raise SkyError( 'Cannot make sky-only weight map without doing sky subtraction' ) if fit_filename is not None: logger.info('Subtracting sky') mini = skyinfo.MiniDecam.load(fit_filename) templates = skyinfo.SkyPC.load(pc_filename) if templates.detpos != image['DETPOS']: # Quit if we don't have the right CCD to subtract logger.error( 'Image DETPOS {:s} does not match sky template {:s}'. format(templates.detpos, image['DETPOS'])) return 1 try: image['BAND'] except: image['BAND'] = decaminfo.get_band(image['FILTER']) try: items_must_match(image, mini.header, 'BAND', 'EXPNUM') items_must_match(image, templates.header, 'BAND') # ??? Could check that template and image use same dome flat except: return 1 sky = templates.sky(mini.coeffs) image.data -= sky image.write_key('SKYSBFIL', path.basename(pc_filename), comment='Sky subtraction template file') for i, c in enumerate(mini.coeffs): image.write_key('SKYPC{:>02d}'.format(i), c, comment='Sky template coefficient') logger.info('Finished sky subtraction') # # Optionally write the sky model that was subtracted from the image. # if skymodel_filename is not None: # Create HDU for output skymodel, add some header info, save output to file logger.info('Optional output of skymodel requested') skymodel_image = DESDataImage(sky) skymodel_image.write_key( 'SKYSBFIL', path.basename(pc_filename), comment='Sky subtraction template file') for i, c in enumerate(mini.coeffs): skymodel_image.write_key( 'SKYPC{:>02d}'.format(i), c, comment='Sky template coefficient') skymodel_image.write_key('BAND', image['BAND'], comment='Band') skymodel_image.write_key('EXPNUM', image['EXPNUM'], comment='Exposure Number') skymodel_image.write_key('CCDNUM', image['CCDNUM'], comment='CCD Number') skymodel_image.write_key('NITE', image['NITE'], comment='Night') # skymodel_image.copy_header_info(image, cls.propagate, require=False) ## ?? catch exception from write error below? skymodel_image.save(skymodel_filename) else: sky = None if weight == 'none': do_weight = False sky_weight = False elif weight == 'sky': do_weight = True sky_weight = True elif weight == 'all': do_weight = True sky_weight = False else: raise SkyError('Invalid weight value: ' + weight) if do_weight: if dome is None: raise SkyError( 'sky_subtract needs dome flat when making weights') if sky_weight: logger.info('Constructing weight image from sky image') data = sky else: logger.info('Constructing weight image from all counts') if sky is None: # If we did not subtract a sky, the image data gives total counts data = image.data else: # Add sky back in to get total counts data = image.data + sky if image.weight is not None or image.variance is not None: image.weight = None image.variance = None logger.warning('Overwriting existing weight image') """ We assume in constructing the weight (=inverse variance) image that the input image here has been divided by the dome flat already, and that its GAIN[AB] keywords are correct for a pixel that has been divided by the FLATMED[AB] of the flat image. So the number of *electrons* that were read in a pixel whose current value=sky is e = sky * (dome/FLATMED) * GAIN The variance has three parts: read noise, and sky Poisson noise, and multiplicative errors from noise in the flat field. The read noise variance, in electrons, is Var = RDNOISE^2 ...and the shot noise from sky was, in electrons, Var = sky * (dome/FLATMED) * GAIN This means the total variance in the image, in its present form, is Var = (RDNOISE * FLATMED / dome / GAIN)^2 + (FLATMED/GAIN)*sky/dome We can also add the uncertainty propagated from shot noise in the dome flat, if the dome image has a weight or variance. In which case we would add Var += var(dome) * sky^2 / dome^2 (remembering that sky has already been divided by the dome). If sky_weight = False, we can substitute the image data for sky in the above calculations. """ # Transform the sky image into a variance image var = np.array(data, dtype=weight_dtype) for amp in decaminfo.amps: sec = section2slice(image['DATASEC' + amp]) invgain = (image['FLATMED' + amp] / image['GAIN' + amp]) / dome.data[sec] var[sec] += image['RDNOISE' + amp]**2 * invgain var[sec] *= invgain # Add noise from the dome flat shot noise, if present if dome.weight is not None: var += data * data / (dome.weight * dome.data * dome.data) elif dome.variance is not None: var += data * data * dome.variance / (dome.data * dome.data) image.variance = var # Now there are statistics desired for the output image header. # First, the median variance at sky level on the two amps, SKYVAR[AB] meds = [] for amp in decaminfo.amps: sec = section2slice(image['DATASEC' + amp]) v = np.median(var[sec][::4, ::4]) image.write_key( 'SKYVAR' + amp, v, comment='Median noise variance at sky level, amp ' + amp) meds.append(v) # SKYSIGMA is overall average noise level image.write_key('SKYSIGMA', np.sqrt(np.mean(meds)), comment='RMS noise at sky level') # SKYBRITE is a measure of sky brightness. Use the sky image if we've got it, else # use the data if sky is None: skybrite = np.median(data[::4, ::4]) else: skybrite = np.median(sky[::2, ::2]) image.write_key('SKYBRITE', skybrite, comment='Median sky brightness') logger.debug('Finished weight construction') # Run null_mask or resaturate if requested in the command-line if cls.do_step('null_mask') or cls.do_step('resaturate'): logger.info("Running null_weights") # We need to fix the step_name if we want to call 'step_run' null_weights.__class__.step_name = config_section #null_weights.__class__.step_name = cls.config_section null_weights.step_run(image, cls.config) ret_code = 0 return ret_code
def _doit(cls, image, flat_im): """Apply a flat field correction to an image - used for both dome and star flats. :Parameters: - `image`: the DESImage to apply a bias correction - `flat_im`: the flat correction image to apply Applies the correction "in place" """ logger.info('Applying Flat') # Check that flat and data are from same CCD and filter try: image['BAND'] except: # Give image a BAND from its FILTER if it's not there image['BAND'] = decaminfo.get_band(image['FILTER']) try: items_must_match(image, flat_im, 'CCDNUM', 'BAND') except: return 1 # Apply flat to the data image.data /= flat_im.data # Update variance or weight image if it exists if image.weight is not None: image.weight *= flat_im.data * flat_im.data if image.variance is not None: image.variance /= flat_im.data * flat_im.data # If mask image exists, mark as BADPIX_BPM any pixels that have # non-positive flat and are not already flagged. if image.mask is not None: # Find flat-field pixels that are invalid but not already bad for # one of these reasons: badmask = maskbits.BADPIX_BPM +\ maskbits.BADPIX_BADAMP +\ maskbits.BADPIX_EDGE badflat = np.logical_and(flat_im.data <= 0., image.mask & badmask) mark_these = np.where(badflat.flatten())[0] image.mask.flatten()[mark_these] |= maskbits.BADPIX_BPM # If a weight or variance image already exists, add to it any additional # variance from the flat: if (image.weight is not None or image.variance is not None): if flat_im.weight is not None: var = image.get_variance() f2 = flat_im.data * flat_im.data var *= f2 var += image.data * image.data / (flat_im.weight * f2) elif flat_im.variance is not None: var = image.get_variance() f2 = flat_im.data * flat_im.data var *= f2 var += image.data * image.data * flat_im.variance / f2 # Update header keywords for rescaling saturate = 0. scales = [] for amp in decaminfo.amps: # Acquire the typical scaling factor for each amp from the flat scalekw = 'FLATMED' + amp if scalekw in flat_im.header.keys(): # Already stored in the flat's header: scale = flat_im[scalekw] else: # Figure it out ourselves from median of a subsample: # sec = DESImage.section2slice(image['DATASEC'+amp]) sec = section2slice(image['DATASEC' + amp]) scale = np.median(flat_im.data[sec][::4, ::4]) scales.append(scale) if scalekw in image.header.keys(): # Add current scaling to any previous ones image[scalekw] = image[scalekw] * scale else: image[scalekw] = scale image['GAIN' + amp] = image['GAIN' + amp] * scale image['SATURAT' + amp] = image['SATURAT' + amp] / scale # Scale the SKYVAR if it's already here kw = 'SKYVAR' + amp if kw in image.header.keys(): image[kw] = image[kw] / (scale * scale) saturate = max(saturate, image['SATURAT' + amp]) # The SATURATE keyword is assigned to maximum of the amps' values. image['SATURATE'] = saturate # Some other keywords that we will adjust crudely with mean rescaling # if they are present: scale = np.mean(scales) for kw in ('SKYBRITE', 'SKYSIGMA'): if kw in image.header.keys(): image[kw] = image[kw] / scale logger.debug('Finished applying Flat') ret_code = 0 return ret_code
def __call__(cls, image, fname_lincor): """Apply a linearity correction :Parameters: - `image`: the DESImage to determine and apply an ovescan correction - `fname_lincor`: the linearity correction FITS table (contains look-up tables) Applies the correction "in place" """ # # Discover the HDU in the linearity correction FITS table that contains data for a specific CCD # fits_inventory = DESFITSInventory(fname_lincor) lincor_hdu = fits_inventory.ccd_hdus(image['CCDNUM']) if len(lincor_hdu) != 1: if not lincor_hdu: logger.error( 'Unable to locate HDU in %s containing linearity correction for CCDNUM %d. Aborting!', fname_lincor, image['CCDNUM']) else: logger.error( 'Found multiple HDUs in %s containing linearity correction for CCDNUM %d. Aborting!', fname_lincor, image['CCDNUM']) raise Exception() logger.info('Reading Linearity Correction from %s', fname_lincor) cat_fits = fitsio.FITS(fname_lincor, 'r') cat_hdu = lincor_hdu[0] cols_retrieve = ["ADU", "ADU_LINEAR_A", "ADU_LINEAR_B"] CAT = cat_fits[cat_hdu].read(columns=cols_retrieve) # # If columns do not get put into CAT in a predefined order then these utilities # may be needed. RAG has them and can implement... left this way for now since it # currently duplicates imcorrect exactly # # CATcol=cat_fits[cat_hdu].get_colnames() # cdict=MkCatDict(CATcol,cols_retrieve) # # Define the correction being made. # nonlinear = [] linearA = [] linearB = [] for row in CAT: nonlinear.append(row[0]) linearA.append(row[1]) linearB.append(row[2]) nonlinear = np.array(nonlinear) linearA = np.array(linearA) linearB = np.array(linearB) interpA = interpolate.interp1d(nonlinear, linearA, kind='linear', copy=True) interpB = interpolate.interp1d(nonlinear, linearB, kind='linear', copy=True) logger.info('Applying Linearity Correction') # # Slice over the datasecs for each amplifier. # Apply the correction # seca = section2slice(image['DATASECA']) secb = section2slice(image['DATASECB']) # Only fix pixels that are in the range of the nonlinearity table in_range = np.logical_and(image.data[seca] >= np.min(nonlinear), image.data[seca] <= np.max(nonlinear)) image.data[seca][in_range] = interpA(image.data[seca][in_range]) in_range = np.logical_and(image.data[secb] >= np.min(nonlinear), image.data[secb] <= np.max(nonlinear)) image.data[secb][in_range] = interpB(image.data[secb][in_range]) image.write_key('LINCFIL', path.basename(fname_lincor), comment='Nonlinearity correction file') ret_code = 0 return ret_code
def __call__(cls, image, bffile, bfmask): """ Apply brighter-fatter correction to an image, and set BADPIX_SUSPECT bit in mask image for pixels adjacent to those with unknown collected charge. :Parameters: - `image`: the DESImage to operate upon. Must have mask plane present. - `bffile`: name of FITS file holding the brighter-fatter coefficients - `bfmask`: which bits in the mask will trigger pixel being ignored - should denote those pixels having unknown amount of charge during integration. """ logger.info('Start brighter-fatter correction') if image.mask is None: raise BFError("Missing mask image for bf_correct") detpos = image['DETPOS'].strip() logger.info('reading BF corrections from %s' % bffile) bf = BFKernel(bffile, detpos) ignore = np.logical_or(np.isinf(image.data), np.isnan(image.data)) ignore = np.logical_or(ignore, (image.mask & bfmask) != 0) # Get a median sky level and replace bad pixels with it when deriving kernel # Also put image into electron units, if not already. data = np.array(image.data) for amp in decaminfo.amps: gain = image['GAIN' + amp] sec = section2slice(image['DATASEC' + amp]) if gain != 1: data[sec] *= gain sky = np.median(data[sec][::4, ::4]) data[sec][ignore[sec]] = sky # Convolve data with R kernel to get right-hand pixel shifts df = np.fft.rfft2(data) kernel = bf.kernelR(data.shape) shift = np.fft.irfft2(df * np.fft.rfft2(kernel)) # Multiply by border charge to get amount of charge to move. charge = 0.5 * (data[:, :-1] + data[:, 1:]) * shift[:, :-1] # Do not shift charge into or out of bad pixels charge[ignore[:, :-1]] = 0. charge[ignore[:, 1:]] = 0. # Adjust data for this shift out = np.array(image.data) for amp in decaminfo.amps: # Redo the temporary gain correction: gain = image['GAIN' + amp] sec = section2slice(image['DATASEC' + amp]) if gain != 1: out[sec] *= gain out[:, 1:] -= charge out[:, :-1] += charge # Now do the upper-edge pixel shifts ??? Add gain factor here & T? kernel = bf.kernelT(data.shape) shift = np.fft.irfft2(df * np.fft.rfft2(kernel)) # Multiply by border charge to get amount of charge to move. charge = 0.5 * (data[:-1, :] + data[1:, :]) * shift[:-1, :] # Do not shift charge into or out of bad pixels charge[ignore[:-1, :]] = 0. charge[ignore[1:, :]] = 0. # Adjust data for this shift out[1:, :] -= charge out[:-1, :] += charge # Undo the gain correction if we made it originally: for amp in decaminfo.amps: gain = image['GAIN' + amp] sec = section2slice(image['DATASEC' + amp]) if gain != 1.: out[sec] /= gain image.data = out # Set the SUSPECT flag for all pixels that were adjacent to # ignored pixels, as their b/f correction is off change_mask = np.zeros(image.mask.shape, dtype=bool) change_mask[:-1, :] |= ignore[1:, :] # mask below change_mask[1:, :] |= ignore[:-1, :] # mask above change_mask[:, :-1] |= ignore[:, 1:] # mask to left change_mask[:, 1:] |= ignore[:, :-1] # mask to right change_mask[ignore] = False # Don't mask what's already bad image.mask[change_mask] |= BADPIX_SUSPECT image.write_key('BFCFIL', path.basename(bffile), comment='Brighter/fatter correction file') ret_code = 0 return ret_code
def __call__(cls, image, bpm_im, saturate, clear): """Create or update the mask plane of an image :Parameters: - `image`: the DESImage to operate upon. Mask plane is created if absent - `bpm_im`: the DESBPMImage with the bad pixel mask. Skips BPM step if None - `saturate`: boolean flag indicating whether to set BADPIX_SATURATE flags - `clear`: if True, clear pre-existing mask. If False, or new bits with old. """ if image.mask is None: image.init_mask() elif clear: image.mask.fill(0) ret_code = 0 if bpm_im is None and saturate is False: logger.warning('Null operation requested in make_mask') return ret_code #Flag saturated pixels first, if requested if saturate: # Check for header keyword of whether it's been done kw = 'DESSAT' if kw in image.header.keys() and not clear: logger.warning('Skipping saturation check (' + kw + ' already set)') else: logger.info('Flagging saturated pixels') nsat = 0 for amp in decaminfo.amps: sec = section2slice(image['DATASEC' + amp]) sat = image['SATURAT' + amp] satpix = image.data[sec] >= sat image.mask[sec][satpix] |= BADPIX_SATURATE nsat += np.count_nonzero(satpix) image.write_key(kw, time.asctime(time.localtime()), comment='Flag saturated pixels') image.write_key('NSATPIX', nsat, comment='Number of saturated pixels') logger.debug('Finished flagging saturated pixels') #Now fill in BPM if bpm_im is not None: # Check for header keyword of whether it's been done kw = 'DESBPM' if kw in image.header.keys() and not clear: logger.warning('Skipping BPM application (' + kw + ' already set)') else: logger.info('Applying BPM') try: items_must_match(image, bpm_im, 'CCDNUM') except: return 1 #====Temporary kluge until we get the new BPMS #Replace CORR with BIAS_COL #bitmask = BPMDEF_CORR #mark = (bpm_im.mask & bitmask) != 0 #bpm_im.mask[mark] |= BPMDEF_BIAS_COL # Clear correctable bits from BPM if any are already set #bpm_im.mask -= (bpm_im.mask & BPMDEF_CORR) #====End kluge # Map the following BPM bits to BADPIX_BPM in the image mask bitmask = BPMDEF_FLAT_MIN | \ BPMDEF_FLAT_MAX | \ BPMDEF_FLAT_MASK | \ BPMDEF_BIAS_HOT | \ BPMDEF_BIAS_WARM | \ BPMDEF_BIAS_MASK | \ BPMDEF_BIAS_COL | \ BPMDEF_FUNKY_COL | \ BPMDEF_WACKY_PIX # ERICM Removed BPMDEF_CORR and added FUNKY_COL to the above list mark = (bpm_im.mask & bitmask) != 0 image.mask[mark] |= BADPIX_BPM # Copy BPM edge pixels to image mask bitmask = BPMDEF_EDGE mark = (bpm_im.mask & bitmask) != 0 image.mask[mark] |= BADPIX_EDGE # Copy bad amplifier bits to image mask bitmask = BPMDEF_BADAMP mark = (bpm_im.mask & bitmask) != 0 image.mask[mark] |= BADPIX_BADAMP # Copy SUSPECT BPM bits to image mask bitmask = BPMDEF_SUSPECT mark = (bpm_im.mask & bitmask) != 0 image.mask[mark] |= BADPIX_SUSPECT # Copy NEAREDGE BPM bits to image mask bitmask = BPMDEF_NEAREDGE mark = (bpm_im.mask & bitmask) != 0 image.mask[mark] |= BADPIX_NEAREDGE # Copy TAPEBUMP BPM bits to image mask bitmask = BPMDEF_TAPEBUMP mark = (bpm_im.mask & bitmask) != 0 image.mask[mark] |= BADPIX_TAPEBUMP # Mark correctable pixels. # Pixels flagged as BPMDEF_BIAS_COL and BPMDEF_FUNKY_COL may be correctable. # We flag them in the image as bad (BADPIX_BPM), but - if fix_columns is run, # the BADPIX_BPM flag will be cleared and the BADPIX_FIX flag will be set # For each column find the number of pixels flagged as BIAS_HOT and BIAS_COL N_BIAS_HOT = np.sum((bpm_im.mask & BPMDEF_BIAS_HOT) > 0, axis=0) N_BIAS_COL = np.sum((bpm_im.mask & BPMDEF_BIAS_COL) > 0, axis=0) maskwidth = bpm_im.mask.shape[1] # First do columns with N_BIAS_COL set for 1 or more pixels biascols = np.arange(maskwidth)[(N_BIAS_COL > 0)] for icol in biascols: #Clear FUNKY_COL bit if set for all pixels in this column #The reason for clearing the bit is that the FUNKY_COL detection is #sensitive to hot bias pixels and may flag those columns by "mistake" #First clear BAD BPM bit if set because of funky column image.mask[:, icol][bpm_im.mask[:, icol] == BPMDEF_FUNKY_COL] &= ~BADPIX_BPM bpm_im.mask[:, icol] -= (bpm_im.mask[:, icol] & BPMDEF_FUNKY_COL) #Correctable columns have exactly 1 BIAS_HOT pixel if N_BIAS_HOT[icol] == 1: #Correctable pixels have BIAS_COL bit set bpm_im.mask[:, icol][( bpm_im.mask[:, icol] & BPMDEF_BIAS_COL) > 0] |= BPMDEF_CORR logger.info('Column ' + str(icol) + ' has 1 hot pixel and is correctable.') else: logger.info('Column ' + str(icol) + ' has ' + str(N_BIAS_HOT[icol]) + ' hot pixels and is NOT correctable.') #Now do columns with FUNKY_COL set. Note that the FUNKY_COL bits have been cleared above #for hot bias columns N_FUNKY_COL = np.sum((bpm_im.mask & BPMDEF_FUNKY_COL) > 0, axis=0) funkycols = np.arange(maskwidth)[(N_FUNKY_COL > 0)] for icol in funkycols: #Correctable pixels have FUNKY_COL bit set bpm_im.mask[:, icol][(bpm_im.mask[:, icol] & BPMDEF_FUNKY_COL) > 0] |= BPMDEF_CORR logger.info('Column ' + str(icol) + ' is funky and correctable.') image[kw] = time.asctime(time.localtime()) image.write_key(kw, time.asctime(time.localtime()), comment='Construct mask from BPM') if bpm_im.sourcefile is None: image.write_key('BPMFIL', 'UNKNOWN', comment='BPM file used to build mask') else: image.write_key('BPMFIL', path.basename(bpm_im.sourcefile), comment='BPM file used to build mask') logger.debug('Finished applying BPM') return ret_code