def __call__(cls, image, bias_im): """Apply a bias correction to an image :Parameters: - `image`: the DESImage to apply a bias correction - `bias_im`: the bias correction image to apply Applies the correction "in place." Also creates BAND and NITE keywords if they are not present. """ logger.info('Applying Bias') # Check that bias and data are from same CCD try: items_must_match(image, bias_im, 'CCDNUM') except: return 1 image.data -= bias_im.data # If we have two weight images, add variance of the bias to the image's if (image.weight is not None or image.variance is not None): if bias_im.weight is not None: var = image.get_variance() var += 1. / bias_im.weight elif bias_im.variance is not None: var = image.get_variance() var += bias_im.variance logger.debug('Finished applying Bias') if bias_im.sourcefile is None: image.write_key('BIASFIL', 'UNKNOWN', comment='Bias correction file') else: image.write_key('BIASFIL', path.basename(bias_im.sourcefile), comment='Bias correction file') # Also create the BAND and NITE keywords if they are not present try: image['BAND'] except: image['BAND'] = decaminfo.get_band(image['FILTER']) try: image['NITE'] except: image['NITE'] = decaminfo.get_nite(image['DATE-OBS']) ret_code = 0 return ret_code
def check_cti(image, CTI, verbose=0): """Function to check for presence of CTI""" # # Initialize ctiDict # ctiDict = {'isCTI': False} ctiDict['expnum'] = image['EXPNUM'] # Also create the BAND and NITE keywords if they are not present try: image['BAND'] except: image['BAND'] = decaminfo.get_band(image['FILTER']) try: image['NITE'] except: image['NITE'] = decaminfo.get_nite(image['DATE-OBS']) band = image['BAND'].strip() sec = section2slice(image['DATASEC' + CTI['amp']]) # # This could become useful if it is necessary to start examining the opposite amplifier in # conjunction with the amplifier that is having a problem # # if (CTI['amp']=="A"): # osec = section2slice(image['DATASEC'+'B']) # else: # osec = section2slice(image['DATASEC'+'A']) maxiter = 10 converge_num = 0.0001 clipsig = 3.0 clip_avg, clip_med, clip_std = lb.medclip(image.data[sec], clipsig, maxiter, converge_num, verbose=0) logger.info(' CTI: Global(clipped): median = {:.3f}, stddev = {:.3f} '.format(clip_med, clip_std)) ctiDict['cmed'] = float(clip_med) ctiDict['cstd'] = float(clip_std) clow = clip_med - (3.0 * clip_std) ctiDict['clow'] = float(clow) # oclip_avg,oclip_med,oclip_std=medclip(image.data[osec],clipsig,maxiter,converge_num,verbose) # print(" Global(oclipped): median = {:.3f}, stddev = {:.3f} ".format(oclip_med,oclip_std)) # oclow=oclip_med-(3.0*oclip_std) # # Obtain row-by-row median to look for horizontal striping (also needed to check/reject edgebleeds) # row_med = np.median(image.data[sec], axis=1) wsm = np.where(row_med < clow) nrow_low = row_med[wsm].size # # Hacky attempt to check for edge-bleed # iedge = [4, 4091] while row_med[iedge[0]] < clow: iedge[0] = iedge[0] + 1 while row_med[iedge[1]] < clow: iedge[1] = iedge[1] - 1 if iedge[0] == 4: iedge[0] = 0 if iedge[1] == 4091: iedge[1] = 4095 nrow_edge = 4096 - (iedge[1] - iedge[0] + 1) logger.info(' CTI: Number of low rows: {:d} (nrow_edge={:d}) '.format(nrow_low, nrow_edge)) # # Blank out pixels that are below the 3-sigma level with respect to median # This removes power from vertical stripes # wsm = np.where(image.data[sec] < clow) npix_low = image.data[sec][wsm].size logger.info(' CTI: Number of low pixels: {:d} '.format(npix_low)) u = image.data[sec] - clip_med u[wsm] = 0.0 # # Harder cut currently not needed. If used this would get rid of all pixels below the median level # (effectively this reduces the amount that noise suppresses contrast of the auto-correlation signal from CTI) # # wsm=np.where(u<0.) # npix_zero=u[wsm].size # logger.info(' CTI: Number of sub-zero pixels: {:d} '.format(npix_zero)) # u[wsm]=0.0 # # Calculate a set of auto-correlations by sampling lags in the x-direction and # then two diaganol sets at PA=+/-45 degrees # Note: y-direction lags would be succeptible to both bad columns and bleeds. # These are normalized by the auto-correlation with lag 0 (defined as 'a' below). # Take a maximum lag that will be calculated and use that to trim the image. # Note: This both gets rid of most edge-effects automatically but also removes the need to calculate an effective normalization for higher lags # maxlag = 100 lagList = [0, 1, 3, 5, 7, 11, 15, 19, 23, 31, 37, 45] a = np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag:-maxlag]) # b=np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag:-maxlag]) x = [1.0] d1 = [1.0] d2 = [1.0] # vx=[1.0] # vd1=[1.0] # vd2=[1.0] # # More lags than those sampled are needed because the diagonal (PA=+/-45) measures will need to be interpolated # for comaparison to lags in the x-direction. # for lag in lagList: if lag != 0: x.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag - lag:-maxlag - lag]) / a) d1.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag - lag:-maxlag - lag]) / a) d2.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag + lag:-maxlag + lag]) / a) # vx.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag-lag:-maxlag-lag])/b) # vd1.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag-lag:-maxlag-lag])/b) # vd2.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag+lag:-maxlag+lag])/b) data = {'lag': np.array(lagList), 'x': np.array(x), 'd1': np.array(d1), 'd2': np.array(d2) # 'vx':np.array(vx), # 'vd1':np.array(vd1), # 'vd2':np.array(vd2) } r2 = np.sqrt(2.0) l1 = data['lag'] l2 = data['lag'] * r2 x1 = data['x'] d1i = np.interp(data['lag'], l2, data['d1']) d2i = np.interp(data['lag'], l2, data['d2']) rd1 = data['x'] / d1i rd2 = data['x'] / d2i # vx1=data['vx'] # vd1i=np.interp(data['lag'],l2,data['vd1']) # vd2i=np.interp(data['lag'],l2,data['vd2']) # vrd1=data['vx']/vd1i # vrd2=data['vx']/vd2i ## vdx=data['x']/data['vx'] # vdx=(rd1+rd2)/(vrd1+vrd2) logger.info(' CTI: lags {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(l1[3], l1[4], l1[6], l1[8], l1[10])) logger.info(' CTI: lx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(x1[3], x1[4], x1[6], x1[8], x1[10])) logger.info(' CTI: d1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d1i[3], d1i[4], d1i[6], d1i[8], d1i[10])) logger.info(' CTI: d2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d2i[3], d2i[4], d2i[6], d2i[8], d2i[10])) logger.info(' CTI: ld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd1[3], rd1[4], rd1[6], rd1[8], rd1[10])) logger.info(' CTI: ld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd2[3], rd2[4], rd2[6], rd2[8], rd2[10])) # logger.info(' CTI: lvx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vx1[3],vx1[4],vx1[6],vx1[8],vx1[10])) # logger.info(' CTI:vd1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd1i[3],vd1i[4],vd1i[6],vd1i[8],vd1i[10])) # logger.info(' CTI:vd2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd2i[3],vd2i[4],vd2i[6],vd2i[8],vd2i[10])) # logger.info(' CTI:vld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd1[3],vrd1[4],vrd1[6],vrd1[8],vrd1[10])) # logger.info(' CTI:vld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd2[3],vrd2[4],vrd2[6],vrd2[8],vrd2[10])) # logger.info(' CTI:vdx0 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vdx[3],vdx[4],vdx[6],vdx[8],vdx[10])) # # Set band dependent thresholds... # Note the criteria used are based on an empirical study of the one example we currently have (CCD=41, Y6) # nrow_lim = 5 if band != "Y": cclim = 0.9 else: cclim = 1.15 # # Now check and set flag based on empirical critera. # First are the horizontal streaks that can appear... # Second are the comparison of the auto-correlation in the x and average of the diaganol directrions # flag_cti = False if nrow_low - nrow_edge >= nrow_lim: flag_cti = True avg_rd = (rd1 + rd2) / 2.0 if avg_rd[3] > cclim and avg_rd[4] > cclim and avg_rd[6] > cclim: flag_cti = True if flag_cti: ctiDict['isCTI'] = True return ctiDict
def __call__(cls, image, fit_filename, pc_filename, weight, dome, skymodel_filename): """ Subtract sky from image using previous principal-components fit. Optionally build weight image from fitted sky or all counts, in which case the dome flat is needed and proper gain values are expected in the image header. :Parameters: - `image`: DESImage that has been flattened with dome already and fit - `fit_filename`: filename with the coefficients from minisky fitting. Sky subtraction is skipped if this is None. - `pc_filename`: filename for the stored full-res sky principal components - `weight`: 'none' to skip weights, 'sky' to calculate weight at sky level, 'all' to use all counts - `dome`: DESImage for the dome flat, needed if weight is not 'none'. - `skymodel_filename`: optional output filename for 'sky' """ if weight == 'sky' and fit_filename is None: raise SkyError( 'Cannot make sky-only weight map without doing sky subtraction' ) if fit_filename is not None: logger.info('Subtracting sky') mini = skyinfo.MiniDecam.load(fit_filename) templates = skyinfo.SkyPC.load(pc_filename) if templates.detpos != image['DETPOS']: # Quit if we don't have the right CCD to subtract logger.error( 'Image DETPOS {:s} does not match sky template {:s}'. format(templates.detpos, image['DETPOS'])) return 1 try: image['BAND'] except: image['BAND'] = decaminfo.get_band(image['FILTER']) try: items_must_match(image, mini.header, 'BAND', 'EXPNUM') items_must_match(image, templates.header, 'BAND') # ??? Could check that template and image use same dome flat except: return 1 sky = templates.sky(mini.coeffs) image.data -= sky image.write_key('SKYSBFIL', path.basename(pc_filename), comment='Sky subtraction template file') for i, c in enumerate(mini.coeffs): image.write_key('SKYPC{:>02d}'.format(i), c, comment='Sky template coefficient') logger.info('Finished sky subtraction') # # Optionally write the sky model that was subtracted from the image. # if skymodel_filename is not None: # Create HDU for output skymodel, add some header info, save output to file logger.info('Optional output of skymodel requested') skymodel_image = DESDataImage(sky) skymodel_image.write_key( 'SKYSBFIL', path.basename(pc_filename), comment='Sky subtraction template file') for i, c in enumerate(mini.coeffs): skymodel_image.write_key( 'SKYPC{:>02d}'.format(i), c, comment='Sky template coefficient') skymodel_image.write_key('BAND', image['BAND'], comment='Band') skymodel_image.write_key('EXPNUM', image['EXPNUM'], comment='Exposure Number') skymodel_image.write_key('CCDNUM', image['CCDNUM'], comment='CCD Number') skymodel_image.write_key('NITE', image['NITE'], comment='Night') # skymodel_image.copy_header_info(image, cls.propagate, require=False) ## ?? catch exception from write error below? skymodel_image.save(skymodel_filename) else: sky = None if weight == 'none': do_weight = False sky_weight = False elif weight == 'sky': do_weight = True sky_weight = True elif weight == 'all': do_weight = True sky_weight = False else: raise SkyError('Invalid weight value: ' + weight) if do_weight: if dome is None: raise SkyError( 'sky_subtract needs dome flat when making weights') if sky_weight: logger.info('Constructing weight image from sky image') data = sky else: logger.info('Constructing weight image from all counts') if sky is None: # If we did not subtract a sky, the image data gives total counts data = image.data else: # Add sky back in to get total counts data = image.data + sky if image.weight is not None or image.variance is not None: image.weight = None image.variance = None logger.warning('Overwriting existing weight image') """ We assume in constructing the weight (=inverse variance) image that the input image here has been divided by the dome flat already, and that its GAIN[AB] keywords are correct for a pixel that has been divided by the FLATMED[AB] of the flat image. So the number of *electrons* that were read in a pixel whose current value=sky is e = sky * (dome/FLATMED) * GAIN The variance has three parts: read noise, and sky Poisson noise, and multiplicative errors from noise in the flat field. The read noise variance, in electrons, is Var = RDNOISE^2 ...and the shot noise from sky was, in electrons, Var = sky * (dome/FLATMED) * GAIN This means the total variance in the image, in its present form, is Var = (RDNOISE * FLATMED / dome / GAIN)^2 + (FLATMED/GAIN)*sky/dome We can also add the uncertainty propagated from shot noise in the dome flat, if the dome image has a weight or variance. In which case we would add Var += var(dome) * sky^2 / dome^2 (remembering that sky has already been divided by the dome). If sky_weight = False, we can substitute the image data for sky in the above calculations. """ # Transform the sky image into a variance image var = np.array(data, dtype=weight_dtype) for amp in decaminfo.amps: sec = section2slice(image['DATASEC' + amp]) invgain = (image['FLATMED' + amp] / image['GAIN' + amp]) / dome.data[sec] var[sec] += image['RDNOISE' + amp]**2 * invgain var[sec] *= invgain # Add noise from the dome flat shot noise, if present if dome.weight is not None: var += data * data / (dome.weight * dome.data * dome.data) elif dome.variance is not None: var += data * data * dome.variance / (dome.data * dome.data) image.variance = var # Now there are statistics desired for the output image header. # First, the median variance at sky level on the two amps, SKYVAR[AB] meds = [] for amp in decaminfo.amps: sec = section2slice(image['DATASEC' + amp]) v = np.median(var[sec][::4, ::4]) image.write_key( 'SKYVAR' + amp, v, comment='Median noise variance at sky level, amp ' + amp) meds.append(v) # SKYSIGMA is overall average noise level image.write_key('SKYSIGMA', np.sqrt(np.mean(meds)), comment='RMS noise at sky level') # SKYBRITE is a measure of sky brightness. Use the sky image if we've got it, else # use the data if sky is None: skybrite = np.median(data[::4, ::4]) else: skybrite = np.median(sky[::2, ::2]) image.write_key('SKYBRITE', skybrite, comment='Median sky brightness') logger.debug('Finished weight construction') # Run null_mask or resaturate if requested in the command-line if cls.do_step('null_mask') or cls.do_step('resaturate'): logger.info("Running null_weights") # We need to fix the step_name if we want to call 'step_run' null_weights.__class__.step_name = config_section #null_weights.__class__.step_name = cls.config_section null_weights.step_run(image, cls.config) ret_code = 0 return ret_code
def __call__(cls, in_filename, out_filename, ccdnum, input_template=None, input_list=None, good_filename=None, reject_rms=None, mem_use=8., bitmask=skyinfo.DEFAULT_SKYMASK): """ Create full-resolution sky templates based on previous PCA. Does this pixel by pixel, via robust fitting of the data in the input full-res images to the PCA coefficients. The full-res input images' filenames are determined from the EXPNUM _either_ by python formatting of the string given in -input_template _or_ by looking at the list of expnum, filename pairs in the file specified by -input-list. Output FITS image has an extension NGOOD giving number of images used in fit at each pixel. :Parameters: - `in_filename`: the file holding the PCA outputs on compressed sky - `out_filename`: filename for the output template - `ccdnum`: which CCD to produce templates for - `input_template`: string that can be formatted with the expnum to yield filename of the DESImage holding the full-res data. - `input_list`: name of a file containing expnum, filename pairs, one pair per line, separated by whitespace. - `good_filename`: Name of a FITS file in which to save number of images contributing to each pixel's fit. No output if None. - `reject_rms`: Exclude exposures with fractional RMS residual sky above this. If this is None, just uses the exposures that PCA used. - `mem_use:` Number of GB to target for memory usage (Default = 8) - `bitmask:` Applied to MASK extension of images for initial bad-pixel exclusion. """ logger.info('Starting sky template construction') # Need exactly one of these two arguments: if not input_template is None ^ input_list is None: logger.error( 'Need exactly one of input_template and input_list to be given' ) return 1 # Acquire PCA information, including the table of info on input exposures pc = skyinfo.MiniskyPC.load(in_filename) pctab = skyinfo.MiniskyPC.get_exposures(in_filename) # Build a MiniDECam that has our choice of CCDs that we can use for indexing. mini = pc.get_pc(0) # Quit if we are requesting template for a CCD that was not compressed detpos = decaminfo.detpos_dict[ccdnum] try: mini.index_of(detpos, 1, 1) except skyinfo.SkyError: logger.error('Template requested for CCDNUM not included in PCA') return 1 # Select exposures we'll use if reject_rms is None: # If no RMS threshold is specified, use the same exposures # that were kept during PCA of compressed skies use = np.array(pctab['USE']) else: # Choose our own threshold use = pctab['RMS'] < reject_rms # Get filenames for the full-res images from list: if input_list is not None: filenames = {} flist = np.loadtxt(input_list, dtype=str) for expnum, filename in flist: filenames[int(expnum)] = filename del flist # Now warn if we are missing expnums and remove from usable exposure list for i, val in enumerate(use): if val and int(pctab['EXPNUM'][i]) not in filenames.keys(): use[i] = False logger.warning('No input filename given for expnum ' + str(expnum)) nimg = np.count_nonzero(use) expnums = [] vv = [] for i, val in enumerate(use): if val: vv.append(pctab['COEFFS'][i]) expnums.append(pctab['EXPNUM'][i]) V = np.vstack(vv) del vv # We'll re-normalize each exposure, and its coefficients, by V[0] norms = np.array(V[:, 0]) V = V.T / norms # V is now of shape (npc,nimg) # The linear solutions will require this: ainv = np.linalg.inv(np.dot(V, V.T)) nexp = V.shape[1] npc = pc.U.shape[1] ySize = decaminfo.shape[0] xSize = decaminfo.shape[1] # Create the output array out = np.zeros((npc, ySize, xSize), dtype=np.float32) # And an array to hold the number of exposures used at each pixel: if good_filename is not None: ngood = np.zeros((ySize, xSize), dtype=np.int16) # Decide how many rows of blocks we'll read from files at a time bytes_per_row = 4 * xSize * pc.blocksize * nimg xBlocks = xSize / pc.blocksize yBlocks = min(int(np.floor(0.8 * mem_use * (2**30) / bytes_per_row)), ySize / pc.blocksize) if yBlocks < 1: logger.warning( 'Proceeding even though mem_use is not enough to store 1 row of blocks' ) yBlocks = 1 d = {'ccd': ccdnum} # A dictionary used to assign names to files hdr = {} # A dictionary of information to go into output image header # A mask of zero is equivalent to no masking: if bitmask == 0: bitmask = None nonConvergentBlocks = 0 # Keep count of blocks where clipping does not converge. # Collect input data in chunks of yBlocks rows of blocks, then process one block at a time. for yStart in range(0, ySize, yBlocks * pc.blocksize): # Acquire the pixel data into a 3d array yStop = min(ySize, yStart + yBlocks * pc.blocksize) logger.info('Working on rows {:d} -- {:d}'.format(yStart, yStop)) data = np.zeros((nimg, yStop - yStart, xSize), dtype=np.float32) # Mask image: mask = np.zeros((nimg, yStop - yStart, xSize), dtype=bool) for i, expnum in enumerate(expnums): d['expnum'] = expnum if input_template is None: # Get the filename from the input list filename = filenames[expnum] else: # Get the filename from formatting the template filename = input_template.format(**d) logger.debug('Getting pixels from ' + filename) with fitsio.FITS(filename) as fits: data[i, :, :] = fits['SCI'][yStart:yStop, :xSize] if bitmask is None: mask[i, :, :] = True else: m = np.array(fits['MSK'][yStart:yStop, :xSize], dtype=np.int16) mask[i, :, :] = (m & bitmask) == 0 del m if yStart == 0: # First time through the images we will be collecting/checking # header information from the contributing images hdrin = fits['SCI'].read_header() usehdr = {} if 'BAND' in hdrin.keys(): usehdr['BAND'] = hdrin['BAND'] elif 'FILTER' in hdrin.keys(): usehdr['BAND'] = decaminfo.get_band( hdrin['FILTER']) else: logger.error('No BAND or FILTER in ' + filename) return 1 if 'NITE' in hdrin.keys(): usehdr['NITE'] = hdrin['NITE'] elif 'DATE-OBS' in hdrin.keys(): usehdr['NITE'] = decaminfo.get_nite( hdrin['DATE-OBS']) else: logger.error('No NITE or DATE-OBS in ' + filename) return 1 if 'FLATFIL' in hdrin.keys(): usehdr['FLATFIL'] = hdrin['FLATFIL'] else: logger.error('No FLATFIL in ' + filename) return 1 if 'CCDNUM' in hdrin.keys(): usehdr['CCDNUM'] = hdrin['CCDNUM'] else: logger.error('No CCDNUM in ' + filename) return 1 if hdr: # First exposure will establish values for the output hdr['BAND'] = usehdr['BAND'] hdr['MINNITE'] = usehdr['NITE'] hdr['MAXNITE'] = usehdr['NITE'] hdr['CCDNUM'] = usehdr['CCDNUM'] if hdr['CCDNUM'] != ccdnum: logger.error( 'Wrong ccdnum {:d} in {:s}'.format( ccdnum, filename)) hdr['FLATFIL'] = usehdr['FLATFIL'] else: # Check that this exposure matches the others try: items_must_match(hdr, usehdr, 'BAND', 'CCDNUM', 'FLATFIL') except: return 1 hdr['MINNITE'] = min(hdr['MINNITE'], usehdr['NITE']) hdr['MAXNITE'] = max(hdr['MAXNITE'], usehdr['NITE']) data /= norms[:, np.newaxis, np.newaxis] # Apply norms to be near unity # Now cycle through all blocks for jb in range((yStop - yStart) / pc.blocksize): for ib in range(xSize / pc.blocksize): logger.debug('Fitting for block ({:d},{:d})'.format( jb + yStart / pc.blocksize, ib)) if ccdnum == decaminfo.ccdnums['S7'] and \ pc.halfS7 and \ ib >= xSize / pc.blocksize / 2: # If we are looking at the bad amp of S7, we'll just # store the median of the normalized images in PC0. # The other PC's stay at zero. out[0, yStart + jb * pc.blocksize: yStart + (jb + 1) * pc.blocksize, ib * pc.blocksize: (ib + 1) * pc.blocksize] = \ np.median(data[:, jb * pc.blocksize: (jb + 1) * pc.blocksize, ib * pc.blocksize: (ib + 1) * pc.blocksize], axis=0) continue # Use PCA of this block as starting guess at solution index = mini.index_of(detpos, yStart / pc.blocksize + jb, ib) guess = np.array(pc.U[index, :]) # Extract the data for this block into (nexp,npix) array block = np.array( data[:, jb * pc.blocksize:(jb + 1) * pc.blocksize, ib * pc.blocksize:(ib + 1) * pc.blocksize]) block.resize(nexp, pc.blocksize * pc.blocksize) bmask = np.array( mask[:, jb * pc.blocksize:(jb + 1) * pc.blocksize, ib * pc.blocksize:(ib + 1) * pc.blocksize]) bmask.resize(nexp, pc.blocksize * pc.blocksize) # We'll scale the guess in each pixel by the typical ratio # of this pixel's data to the PCA model for the block, and # also estimate noise as dispersion about this guess model = np.dot(guess, V) ratio = block / model[:, np.newaxis] scale, var, n = clippedMean(ratio, 4, axis=0) clip = 3. * np.sqrt(var.data) * scale.data # First guess at solution is the outer product of superblock PCA # with the scaling per pixel soln = guess[:, np.newaxis] * scale.data del scale, var, ratio, n # Linear solution with clipping iteration MAX_ITERATIONS = 20 TOLERANCE = 0.0001 for i in range(MAX_ITERATIONS): model = np.dot(V.T, soln) # Residuals from model are used to clip resid = block - model # Find clipped points and masked ones good = np.logical_and(resid < clip, resid > -clip) good = np.logical_and(good, bmask) # Set residual to zero at bad pixels resid[~good] = 0. # Get shift in linear solution from residuals: dsoln = np.dot(ainv, np.dot(V, resid)) soln += dsoln # Calculate largest change in model as convergence criterion shift = np.max(np.abs(np.dot(V.T, dsoln))) logger.debug('Iteration {:d}, model shift {:f}'.format( i, shift)) if shift < TOLERANCE: break if i == MAX_ITERATIONS - 1: nonConvergentBlocks = nonConvergentBlocks + 1 # Save results into big matrices soln.resize(npc, pc.blocksize, pc.blocksize) out[:, yStart + jb * pc.blocksize:yStart + (jb + 1) * pc.blocksize, ib * pc.blocksize:(ib + 1) * pc.blocksize] = soln if good_filename is not None: # Gin up a masked array because it allows counting along an axis nblock = np.ma.count_masked(\ np.ma.masked_array(np.zeros_like(good), good), axis=0) nblock.resize(pc.blocksize, pc.blocksize) ngood[yStart + jb * pc.blocksize:yStart + (jb + 1) * pc.blocksize, ib * pc.blocksize:(ib + 1) * pc.blocksize] = nblock del nblock del resid, model, good, dsoln, block del data if nonConvergentBlocks > 0: logger.warning( 'Clipping did not converge for {:d} blocks out of {:d}'.format( nonConvergentBlocks, xBlocks * (ySize / pc.blocksize))) # Add a history line about creation here hdr['HISTORY'] = time.asctime(time.localtime()) + \ ' Build sky template from PCA file {:s}'.format(path.basename(in_filename)) # Save the template into the outfile spc = skyinfo.SkyPC(out, detpos, header=hdr) spc.save(out_filename) del out # Save the number of good sky pixels in another extension if good_filename is not None: gimg = DESDataImage(ngood, header={ 'DETPOS': detpos, 'CCDNUM': ccdnum }) logger.debug('Writing ngood to ' + good_filename) gimg.save(good_filename) del gimg, ngood logger.debug('Finished sky template') ret_code = 0 return ret_code
def _doit(cls, image, flat_im): """Apply a flat field correction to an image - used for both dome and star flats. :Parameters: - `image`: the DESImage to apply a bias correction - `flat_im`: the flat correction image to apply Applies the correction "in place" """ logger.info('Applying Flat') # Check that flat and data are from same CCD and filter try: image['BAND'] except: # Give image a BAND from its FILTER if it's not there image['BAND'] = decaminfo.get_band(image['FILTER']) try: items_must_match(image, flat_im, 'CCDNUM', 'BAND') except: return 1 # Apply flat to the data image.data /= flat_im.data # Update variance or weight image if it exists if image.weight is not None: image.weight *= flat_im.data * flat_im.data if image.variance is not None: image.variance /= flat_im.data * flat_im.data # If mask image exists, mark as BADPIX_BPM any pixels that have # non-positive flat and are not already flagged. if image.mask is not None: # Find flat-field pixels that are invalid but not already bad for # one of these reasons: badmask = maskbits.BADPIX_BPM +\ maskbits.BADPIX_BADAMP +\ maskbits.BADPIX_EDGE badflat = np.logical_and(flat_im.data <= 0., image.mask & badmask) mark_these = np.where(badflat.flatten())[0] image.mask.flatten()[mark_these] |= maskbits.BADPIX_BPM # If a weight or variance image already exists, add to it any additional # variance from the flat: if (image.weight is not None or image.variance is not None): if flat_im.weight is not None: var = image.get_variance() f2 = flat_im.data * flat_im.data var *= f2 var += image.data * image.data / (flat_im.weight * f2) elif flat_im.variance is not None: var = image.get_variance() f2 = flat_im.data * flat_im.data var *= f2 var += image.data * image.data * flat_im.variance / f2 # Update header keywords for rescaling saturate = 0. scales = [] for amp in decaminfo.amps: # Acquire the typical scaling factor for each amp from the flat scalekw = 'FLATMED' + amp if scalekw in flat_im.header.keys(): # Already stored in the flat's header: scale = flat_im[scalekw] else: # Figure it out ourselves from median of a subsample: # sec = DESImage.section2slice(image['DATASEC'+amp]) sec = section2slice(image['DATASEC' + amp]) scale = np.median(flat_im.data[sec][::4, ::4]) scales.append(scale) if scalekw in image.header.keys(): # Add current scaling to any previous ones image[scalekw] = image[scalekw] * scale else: image[scalekw] = scale image['GAIN' + amp] = image['GAIN' + amp] * scale image['SATURAT' + amp] = image['SATURAT' + amp] / scale # Scale the SKYVAR if it's already here kw = 'SKYVAR' + amp if kw in image.header.keys(): image[kw] = image[kw] / (scale * scale) saturate = max(saturate, image['SATURAT' + amp]) # The SATURATE keyword is assigned to maximum of the amps' values. image['SATURATE'] = saturate # Some other keywords that we will adjust crudely with mean rescaling # if they are present: scale = np.mean(scales) for kw in ('SKYBRITE', 'SKYSIGMA'): if kw in image.header.keys(): image[kw] = image[kw] / scale logger.debug('Finished applying Flat') ret_code = 0 return ret_code
os.mkdir('CATS') except OSError: print "CATS directory couldn't be created, continuing" # Move to the working directory startDir = os.getcwd() scratchDir = getScratchDir('{:>08d}'.format(expid)) os.makedirs(scratchDir) os.chdir(scratchDir) # Link the raw image into the scatch directory d = {'expid':expid} raw = raw_file.format(**d) os.symlink(os.path.join(startDir,'RAW/',raw),raw) # Get band name from FILTER keyword band = decaminfo.get_band(fitsio.read_header(raw,0)['FILTER']) d['band'] = band # Do detrending detrend(**d) # Do bleed/star masking for ccd in range(1,63): if ccd not in (2,61): d['ccd']=ccd bleed(**d) # Substract sky, create weights skysubtract(**d) # produce star catalogs for ccd in range(1,63):