def _identify_crr(self, in_img, blot_img, blotder_img, exptime, sky_val): """Identify cosmic rays and other deviant pixels. The code was taken from muldidrizzle.DrizCR. Small adjustments and re-factoring was done. """ # create an empty file __crMask = numpy.zeros(in_img.shape, dtype=numpy.uint8) # Part 1 of computation: # flag the central pixels # Create a temp array mask __t1 = numpy.absolute(in_img - blot_img) __ta = numpy.sqrt(numpy.absolute(blot_img * exptime + sky_val * exptime) + self.rdnoise*self.rdnoise) __t2 = self.driz_cr_scale[0] * blotder_img + self.driz_cr_snr[0] * __ta / exptime __tmp1 = numpy.logical_not(numpy.greater(__t1, __t2)) # mop up del __ta del __t1 del __t2 # Create a convolution kernel that is 3 x 3 of 1's __kernel = numpy.ones((3, 3), dtype=numpy.uint8) # Create an output tmp file the same size as the input temp mask array __tmp2 = numpy.zeros(__tmp1.shape, dtype=numpy.int16) # Convolve the mask with the kernel convolve.convolve2d(__tmp1, __kernel, output=__tmp2, fft=0, mode='nearest', cval=0) del __kernel del __tmp1 # Part 2 of computation # flag the neighboring pixels # Create the CR Mask __xt1 = numpy.absolute(in_img - blot_img) __xta = numpy.sqrt(numpy.absolute(blot_img * exptime + sky_val * exptime) + self.rdnoise*self.rdnoise) __xt2 = self.driz_cr_scale[1] * blotder_img + self.driz_cr_snr[1] * __xta / exptime # It is necessary to use a bitwise 'and' to create the mask with numarray objects. __crMask = numpy.logical_not(numpy.greater(__xt1, __xt2) & numpy.less(__tmp2,9) ) del __xta del __xt1 del __xt2 del __tmp2 # Part 3 of computation - flag additional cte 'radial' # and 'tail' pixels surrounding CR pixels as CRs # In both the 'radial' and 'length' kernels below, 0->good and # 1->bad, so that upon # convolving the kernels with __crMask, the convolution # output will have low->bad and high->good # from which 2 new arrays are created having 0->bad and 1->good. # These 2 new arrays are then 'anded' # to create a new __crMask. # recast __crMask to int for manipulations below; # will recast to Bool at end __crMask_orig_bool = __crMask.copy() __crMask = __crMask_orig_bool.astype(numpy.int8) # make radial convolution kernel and convolve it with original __crMask # kernel for radial masking of CR pixel cr_grow_kernel = numpy.ones((self.driz_cr_grow, self.driz_cr_grow)) cr_grow_kernel_conv = __crMask.copy() # for output of convolution convolve.convolve2d(__crMask, cr_grow_kernel, output=cr_grow_kernel_conv) # make tail convolution kernel and convolve it with original __crMask cr_ctegrow_kernel = numpy.zeros((2*self.driz_cr_ctegrow+1, 2*self.driz_cr_ctegrow+1)) # kernel for tail masking of CR pixel cr_ctegrow_kernel_conv = __crMask.copy() # for output convolution # which pixels are masked by tail kernel depends on sign of # ctedir (i.e., readout direction): ctedir = 0 if (ctedir == 1): # HRC: amp C or D ; WFC: chip = sci,1 ; WFPC2 cr_ctegrow_kernel[0:ctegrow, ctegrow] = 1 # 'positive' direction if (ctedir == -1): # HRC: amp A or B ; WFC: chip = sci,2 cr_ctegrow_kernel[ctegrow+1:2*ctegrow+1, ctegrow ] = 1 #'negative' direction if (ctedir == 0): # NICMOS: no cte tail correction pass # do the convolution convolve.convolve2d(__crMask, cr_ctegrow_kernel, output = cr_ctegrow_kernel_conv) # select high pixels from both convolution outputs; then 'and' them to create new __crMask where_cr_grow_kernel_conv = numpy.where(cr_grow_kernel_conv < self.driz_cr_grow*self.driz_cr_grow,0,1 ) # radial where_cr_ctegrow_kernel_conv = numpy.where(cr_ctegrow_kernel_conv < self.driz_cr_ctegrow, 0, 1 ) # length __crMask = numpy.logical_and(where_cr_ctegrow_kernel_conv, where_cr_grow_kernel_conv) # combine masks __crMask = __crMask.astype(numpy.uint8) # cast back to Bool del __crMask_orig_bool del cr_grow_kernel del cr_grow_kernel_conv del cr_ctegrow_kernel del cr_ctegrow_kernel_conv del where_cr_grow_kernel_conv del where_cr_ctegrow_kernel_conv # get back the result return __crMask
def convolve_helper(data, kernel, method='fftconvolve', fill_scipy=False, cval=0.0): """ Handle 2D convolution methods Parameters ========== method: str 'fftconvolve':``scipy.signal.fftconvolve(data, kernel, mode='same')`` 'oaconvolve':``scipy.signal.oaconvolve(data, kernel, mode='same')`` 'stsci':``stsci.convolve.convolve2d(data, kernel, fft=1, mode='constant', cval=cval)`` 'xstsci': Try ``stsci`` but fall back to ``fftconvolve`` if failed to `import stsci.convolve`. If ``fill_scipy=True`` or ``method='stsci'``, the ``data`` array will be expanded to include the kernel size and padded with values given by ``cval``. """ if method == 'xstsci': try: from stsci.convolve import convolve2d method = 'stsci' except: print('import stsci.convolve failed. Fall back to fftconvolve.') method = 'fftconvolve' if method in ['oaconvolve', 'fftconvolve']: from scipy.signal import fftconvolve, oaconvolve if method == 'fftconvolve': convolve_func = fftconvolve else: convolve_func = oaconvolve if fill_scipy: sh = data.shape shk = kernel.shape _data = np.zeros((sh[0] + 2 * shk[0], sh[1] + 2 * shk[1])) + cval _data[shk[0]:-shk[0], shk[1]:-shk[1]] = data else: _data = data conv = convolve_func(_data, kernel, mode='same') if fill_scipy: conv = conv[shk[0]:-shk[0], shk[1]:-shk[1]] elif method == 'stsci': from stsci.convolve import convolve2d conv = convolve2d(data, kernel, mode='constant', cval=cval, fft=1) else: raise ValueError("Valid options for `method` are 'fftconvolve'," "'oaconvolve', 'stsci' ('xstsci').") return conv
def findstars(jdata, fwhm, threshold, skymode, peakmin=None, peakmax=None, fluxmin=None, fluxmax=None, nsigma=1.5, ratio=1.0, theta=0.0, use_sharp_round=False,mask=None, sharplo=0.2,sharphi=1.0,roundlo=-1.0,roundhi=1.0): # store input image size: (img_ny, img_nx) = jdata.shape # Define convolution inputs nx, ny, a, b, c, f = gausspars(fwhm, nsigma=nsigma, ratio= ratio, theta=theta) xc = nx//2 yc = ny//2 yin, xin = np.mgrid[0:ny, 0:nx] kernel = gaussian1(1.0, xc, yc, a, b, c)(xin,yin) # define size of extraction box for each source based on kernel size grx = xc gry = yc # DAOFIND STYLE KERNEL "SHAPE" rmat = np.sqrt((xin-xc)**2 + (yin-yc)**2) rmatell = a*(xin-xc)**2 + b*(xin-xc)*(yin-yc) + c*(yin-yc)**2 xyrmask = np.where((rmatell <= 2*f) | (rmat <= 2.001),1,0).astype(np.int16) # Previous *style* computation for kernel "shape": #xyrmask = np.where(rmat <= max(grx,gry),1,0).astype(np.int16) npts = xyrmask.sum() rmask = kernel*xyrmask denom = (rmask*rmask).sum() - rmask.sum()**2/npts nkern = (rmask - (rmask.sum()/npts))/denom # normalize kernel to preserve # fluxes for thresholds nkern *= xyrmask # initialize values used for getting source centers relerr = 1./((rmask**2).sum() - (rmask.sum()**2/xyrmask.sum())) xsigsq = (fwhm/fwhm2sig)**2 ysigsq = (ratio**2) * xsigsq # convolve image with gaussian kernel convdata = convolve.convolve2d(jdata, nkern).astype(np.float32) # clip image to create regions around each source for segmentation if mask is None: #tdata=np.where(convdata > skymode*2.0, convdata, 0) tdata=np.where(convdata > threshold, convdata, 0) else: tdata=np.where((convdata > threshold) & mask, convdata, 0) # segment image and find sources s = ndim.generate_binary_structure(2,2) ldata,nobj=ndim.label(tdata,structure=s) fobjects = ndim.find_objects(ldata) #print 'Number of potential sources: ',nobj fluxes = [] fitind = [] if nobj < 2: print('No objects found for this image. Please check value of "threshold".') return fitind,fluxes # determine center of each source, while removing spurious sources or # applying limits defined by the user ninit = 0 ninit2 = 0 #minxx = grx * 2 + 1 #minxy = gry * 2 + 1 s2m, s4m = precompute_sharp_round(nx, ny, xc, yc) satur = False # Default assumption if use_sharp_round=False sharp = None round1 = None round2 = None for ss,n in zip(fobjects,range(len(fobjects))): ssx = ss[1].stop - ss[1].start ssy = ss[0].stop - ss[0].start if ssx >= tdata.shape[1]-1 or ssy >= tdata.shape[0]-1: continue yr0 = ss[0].start - gry yr1 = ss[0].stop + gry + 1 if yr0 <= 0 or yr1 >= img_ny: continue # ignore sources within ny//2 of edge #if yr0 <= 0: yr0 = 0 #if yr1 >= jdata.shape[0]: yr1 = jdata.shape[0] xr0 = ss[1].start - grx xr1 = ss[1].stop + grx + 1 if xr0 <= 0 or xr1 >= img_nx: continue # ignore sources within nx//2 of edge #if xr0 <= 0: xr0 = 0 #if xr1 >= jdata.shape[1]: xr1 = jdata.shape[1] ssnew = (slice(yr0,yr1),slice(xr0,xr1)) region = tdata[ssnew] #if region.shape[0] < minxy or region.shape[1] < minxy: # continue cntr = centroid(region) # Define region centered on max value in object (slice) # This region will be bounds-checked to insure that it only accesses # a valid section of the image (not off the edge) maxpos = (int(cntr[1]+0.5)+ssnew[0].start,int(cntr[0]+0.5)+ssnew[1].start) yr0 = maxpos[0] - gry yr1 = maxpos[0] + gry + 1 if yr0 < 0 or yr1 > img_ny: continue xr0 = maxpos[1] - grx xr1 = maxpos[1] + grx + 1 if xr0 < 0 or xr1 > img_nx: continue #ninit += 1 # Simple Centroid on the region from the input image jregion = jdata[yr0:yr1,xr0:xr1] src_flux = jregion.sum() src_peak = jregion.max() if (peakmax is not None and src_peak >= peakmax): continue if (peakmin is not None and src_peak <= peakmin): continue if fluxmin and src_flux <= fluxmin: continue if fluxmax and src_flux >= fluxmax: continue #ninit2 += 1 datamin = jregion.min() datamax = jregion.max() if use_sharp_round: # Compute sharpness and first estimate of roundness: dregion = convdata[yr0:yr1,xr0:xr1] satur, round1, sharp = \ sharp_round(jregion, dregion, xyrmask, xc, yc, s2m, s4m, nx, ny, datamin, datamax) # Filter sources: if sharp is None or (sharp < sharplo or sharp > sharphi): continue if round1 is None or (round1 < roundlo or round1 > roundhi): continue px,py,round2 = xy_round(jregion, grx, gry, skymode, kernel, xsigsq, ysigsq, datamin, datamax) # Filter sources: if px is None: continue if use_sharp_round: if not satur and \ (round2 is None or round2 < roundlo or round2 > roundhi): continue fitind.append((px+xr0,py+yr0,sharp, round1, round2)) # compute a source flux value fluxes.append(src_flux) fitindc,fluxesc = apply_nsigma_separation(fitind,fluxes,fwhm*nsigma/2) #print 'ninit: ',ninit,' ninit2: ',ninit2,' final n: ',len(fitind) return fitindc, fluxesc
def findstars(jdata, fwhm, threshold, skymode, peakmin=None, peakmax=None, fluxmin=None, fluxmax=None, nsigma=1.5, ratio=1.0, theta=0.0, use_sharp_round=False, mask=None, sharplo=0.2, sharphi=1.0, roundlo=-1.0, roundhi=1.0): # store input image size: (img_ny, img_nx) = jdata.shape # Define convolution inputs nx, ny, a, b, c, f = gausspars(fwhm, nsigma=nsigma, ratio=ratio, theta=theta) xc = nx // 2 yc = ny // 2 yin, xin = np.mgrid[0:ny, 0:nx] kernel = gaussian1(1.0, xc, yc, a, b, c)(xin, yin) # define size of extraction box for each source based on kernel size grx = xc gry = yc # DAOFIND STYLE KERNEL "SHAPE" rmat = np.sqrt((xin - xc)**2 + (yin - yc)**2) rmatell = a * (xin - xc)**2 + b * (xin - xc) * (yin - yc) + c * (yin - yc)**2 xyrmask = np.where((rmatell <= 2 * f) | (rmat <= 2.001), 1, 0).astype(np.int16) # Previous *style* computation for kernel "shape": #xyrmask = np.where(rmat <= max(grx,gry),1,0).astype(np.int16) npts = xyrmask.sum() rmask = kernel * xyrmask denom = (rmask * rmask).sum() - rmask.sum()**2 / npts nkern = (rmask - (rmask.sum() / npts)) / denom # normalize kernel to preserve # fluxes for thresholds nkern *= xyrmask # initialize values used for getting source centers relerr = 1. / ((rmask**2).sum() - (rmask.sum()**2 / xyrmask.sum())) xsigsq = (fwhm / fwhm2sig)**2 ysigsq = (ratio**2) * xsigsq # convolve image with gaussian kernel convdata = convolve.convolve2d(jdata, nkern).astype(np.float32) # clip image to create regions around each source for segmentation if mask is None: #tdata=np.where(convdata > skymode*2.0, convdata, 0) tdata = np.where(convdata > threshold, convdata, 0) else: tdata = np.where((convdata > threshold) & mask, convdata, 0) # segment image and find sources s = ndim.generate_binary_structure(2, 2) ldata, nobj = ndim.label(tdata, structure=s) fobjects = ndim.find_objects(ldata) #print 'Number of potential sources: ',nobj fluxes = [] fitind = [] if nobj < 2: print( 'No objects found for this image. Please check value of "threshold".' ) return fitind, fluxes # determine center of each source, while removing spurious sources or # applying limits defined by the user ninit = 0 ninit2 = 0 #minxx = grx * 2 + 1 #minxy = gry * 2 + 1 s2m, s4m = precompute_sharp_round(nx, ny, xc, yc) satur = False # Default assumption if use_sharp_round=False sharp = None round1 = None round2 = None for ss, n in zip(fobjects, range(len(fobjects))): ssx = ss[1].stop - ss[1].start ssy = ss[0].stop - ss[0].start if ssx >= tdata.shape[1] - 1 or ssy >= tdata.shape[0] - 1: continue yr0 = ss[0].start - gry yr1 = ss[0].stop + gry + 1 if yr0 <= 0 or yr1 >= img_ny: continue # ignore sources within ny//2 of edge #if yr0 <= 0: yr0 = 0 #if yr1 >= jdata.shape[0]: yr1 = jdata.shape[0] xr0 = ss[1].start - grx xr1 = ss[1].stop + grx + 1 if xr0 <= 0 or xr1 >= img_nx: continue # ignore sources within nx//2 of edge #if xr0 <= 0: xr0 = 0 #if xr1 >= jdata.shape[1]: xr1 = jdata.shape[1] ssnew = (slice(yr0, yr1), slice(xr0, xr1)) region = tdata[ssnew] #if region.shape[0] < minxy or region.shape[1] < minxy: # continue cntr = centroid(region) # Define region centered on max value in object (slice) # This region will be bounds-checked to insure that it only accesses # a valid section of the image (not off the edge) maxpos = (int(cntr[1] + 0.5) + ssnew[0].start, int(cntr[0] + 0.5) + ssnew[1].start) yr0 = maxpos[0] - gry yr1 = maxpos[0] + gry + 1 if yr0 < 0 or yr1 > img_ny: continue xr0 = maxpos[1] - grx xr1 = maxpos[1] + grx + 1 if xr0 < 0 or xr1 > img_nx: continue #ninit += 1 # Simple Centroid on the region from the input image jregion = jdata[yr0:yr1, xr0:xr1] src_flux = jregion.sum() src_peak = jregion.max() if (peakmax is not None and src_peak >= peakmax): continue if (peakmin is not None and src_peak <= peakmin): continue if fluxmin and src_flux <= fluxmin: continue if fluxmax and src_flux >= fluxmax: continue #ninit2 += 1 datamin = jregion.min() datamax = jregion.max() if use_sharp_round: # Compute sharpness and first estimate of roundness: dregion = convdata[yr0:yr1, xr0:xr1] satur, round1, sharp = \ sharp_round(jregion, dregion, xyrmask, xc, yc, s2m, s4m, nx, ny, datamin, datamax) # Filter sources: if sharp is None or (sharp < sharplo or sharp > sharphi): continue if round1 is None or (round1 < roundlo or round1 > roundhi): continue px, py, round2 = xy_round(jregion, grx, gry, skymode, kernel, xsigsq, ysigsq, datamin, datamax) # Filter sources: if px is None: continue if use_sharp_round: if not satur and \ (round2 is None or round2 < roundlo or round2 > roundhi): continue fitind.append((px + xr0, py + yr0, sharp, round1, round2)) # compute a source flux value fluxes.append(src_flux) fitindc, fluxesc = apply_nsigma_separation(fitind, fluxes, fwhm * nsigma / 2) #print 'ninit: ',ninit,' ninit2: ',ninit2,' final n: ',len(fitind) return fitindc, fluxesc
def _drizCr(sciImage, virtual_outputs, paramDict): """mask blemishes in dithered data by comparison of an image with a model image and the derivative of the model image. sciImage is an imageObject which contains the science data blotImage is inferred from the sciImage object here which knows the name of its blotted image :) chip should be the science chip that corresponds to the blotted image that was sent paramDict contains the user parameters derived from the full configObj instance dgMask is inferred from the sciImage object, the name of the mask file to combine with the generated Cosmic ray mask here are the options you can override in configObj gain = 7 # Detector gain, e-/ADU grow = 1 # Radius around CR pixel to mask [default=1 for 3x3 for non-NICMOS] ctegrow = 0 # Length of CTE correction to be applied rn = 5 # Read noise in electrons snr = "4.0 3.0" # Signal-to-noise ratio scale = "0.5 0.4" # scaling factor applied to the derivative backg = 0 # Background value expkey = "exptime" # exposure time keyword blot images are saved out to simple fits files with 1 chip in them so for example in ACS, there will be 1 image file with 2 chips that is the original image and 2 blotted image files, each with 1 chip so I'm imagining calling this function twice, once for each chip, but both times with the same original science image file, output files and some input (output from previous steps) are referenced in the imageobject itself """ grow = paramDict["driz_cr_grow"] ctegrow = paramDict["driz_cr_ctegrow"] # try: # assert(chip is not None), 'Please specify a chip to process for blotting' # assert(sciImage is not None), 'Please specify a science image object for blotting' # except AssertionError: # print "Problem with value of chip or sciImage to drizCR" # print sciImage # raise # raise orig error crcorr_list =[] crMaskDict = {} for chip in range(1, sciImage._numchips + 1, 1): exten = sciImage.scienceExt + ',' +str(chip) scienceChip = sciImage[exten] if scienceChip.group_member: blotImagePar = 'blotImage' blotImageName = scienceChip.outputNames[blotImagePar] if sciImage.inmemory: __blotImage = sciImage.virtualOutputs[blotImageName] else: try: os.access(blotImageName,os.F_OK) except IOError: print("Could not find the Blotted image on disk:",blotImageName) raise # raise orig error try: __blotImage = fits.open(blotImageName, mode="readonly", memmap=False) except IOError: print("Problem opening blot images") raise #blotImageName=scienceChip.outputNames["blotImage"] # input file crMaskImage=scienceChip.outputNames["crmaskImage"] # output file ctedir=scienceChip.cte_dir #check that sciImage and blotImage are the same size? #grab the actual image from disk __inputImage=sciImage.getData(exten) # Apply any unit conversions to input image here for comparison # with blotted image in units of electrons __inputImage *= scienceChip._conversionFactor #make the derivative blot image __blotData=__blotImage[0].data*scienceChip._conversionFactor #simple fits __blotDeriv = quickDeriv.qderiv(__blotData) if not sciImage.inmemory: __blotImage.close() #this grabs the original dq mask from the science image # This mask needs to take into account any crbits values # specified by the user to be ignored. A call to the # buildMask() method may work better here... #__dq = sciImage.maskExt + ',' + str(chip) #__dqMask=sciImage.getData(__dq) __dqMask = sciImage.buildMask(chip,paramDict['crbit']) # both args are ints #parse out the SNR information __SNRList=(paramDict["driz_cr_snr"]).split() __snr1=float(__SNRList[0]) __snr2=float(__SNRList[1]) #parse out the scaling information __scaleList = (paramDict["driz_cr_scale"]).split() __mult1 = float(__scaleList[0]) __mult2 = float(__scaleList[1]) __gain=scienceChip._effGain __rn=scienceChip._rdnoise __backg = scienceChip.subtractedSky*scienceChip._conversionFactor # Define output cosmic ray mask to populate __crMask = np.zeros(__inputImage.shape,dtype=np.uint8) # Set scaling factor (used by MultiDrizzle) to 1 since scaling has # already been accounted for in blotted image __expmult = 1. ################## COMPUTATION PART I ################### # Create a temporary array mask __t1 = np.absolute(__inputImage - __blotData) __ta = np.sqrt(__gain * np.absolute(__blotData * __expmult + __backg * __expmult) + __rn * __rn) __tb = ( __mult1 * __blotDeriv + __snr1 * __ta / __gain ) del __ta __t2 = __tb / __expmult del __tb __tmp1 = np.logical_not(np.greater(__t1, __t2)) del __t1 del __t2 # Create a convolution kernel that is 3 x 3 of 1's __kernel = np.ones((3,3),dtype=np.uint8) # Create an output tmp file the same size as the input temp mask array __tmp2 = np.zeros(__tmp1.shape,dtype=np.int16) # Convolve the mask with the kernel NC.convolve2d(__tmp1,__kernel,output=__tmp2,fft=0,mode='nearest',cval=0) del __kernel del __tmp1 ################## COMPUTATION PART II ################### # Create the CR Mask __xt1 = np.absolute(__inputImage - __blotData) __xta = np.sqrt(__gain * np.absolute(__blotData * __expmult + __backg * __expmult) + __rn * __rn) __xtb = ( __mult2 *__blotDeriv + __snr2 * __xta / __gain ) del __xta __xt2 = __xtb / __expmult del __xtb # It is necessary to use a bitwise 'and' to create the mask with numarray objects. __crMask = np.logical_not(np.greater(__xt1, __xt2) & np.less(__tmp2,9) ) del __xt1 del __xt2 del __tmp2 ################## COMPUTATION PART III ################### #flag additional cte 'radial' and 'tail' pixels surrounding CR pixels as CRs # In both the 'radial' and 'length' kernels below, 0->good and 1->bad, so that upon # convolving the kernels with __crMask, the convolution output will have low->bad and high->good # from which 2 new arrays are created having 0->bad and 1->good. These 2 new arrays are then 'anded' # to create a new __crMask. # recast __crMask to int for manipulations below; will recast to Bool at end __crMask_orig_bool= __crMask.copy() __crMask= __crMask_orig_bool.astype( np.int8 ) # make radial convolution kernel and convolve it with original __crMask cr_grow_kernel = np.ones((grow, grow)) # kernel for radial masking of CR pixel cr_grow_kernel_conv = __crMask.copy() # for output of convolution NC.convolve2d( __crMask, cr_grow_kernel, output = cr_grow_kernel_conv) # make tail convolution kernel and convolve it with original __crMask cr_ctegrow_kernel = np.zeros((2*ctegrow+1,2*ctegrow+1)) # kernel for tail masking of CR pixel cr_ctegrow_kernel_conv = __crMask.copy() # for output convolution # which pixels are masked by tail kernel depends on sign of ctedir (i.e.,readout direction): if ( ctedir == 1 ): # HRC: amp C or D ; WFC: chip = sci,1 ; WFPC2 cr_ctegrow_kernel[ 0:ctegrow, ctegrow ]=1 # 'positive' direction if ( ctedir == -1 ): # HRC: amp A or B ; WFC: chip = sci,2 cr_ctegrow_kernel[ ctegrow+1:2*ctegrow+1, ctegrow ]=1 #'negative' direction if ( ctedir == 0 ): # NICMOS: no cte tail correction pass # do the convolution NC.convolve2d( __crMask, cr_ctegrow_kernel, output = cr_ctegrow_kernel_conv) # select high pixels from both convolution outputs; then 'and' them to create new __crMask where_cr_grow_kernel_conv = np.where( cr_grow_kernel_conv < grow*grow,0,1 ) # radial where_cr_ctegrow_kernel_conv = np.where( cr_ctegrow_kernel_conv < ctegrow, 0, 1 ) # length __crMask = np.logical_and( where_cr_ctegrow_kernel_conv, where_cr_grow_kernel_conv) # combine masks __crMask = __crMask.astype(np.uint8) # cast back to Bool del __crMask_orig_bool del cr_grow_kernel del cr_grow_kernel_conv del cr_ctegrow_kernel del cr_ctegrow_kernel_conv del where_cr_grow_kernel_conv del where_cr_ctegrow_kernel_conv # Apply CR mask to the DQ array in place np.bitwise_and(__dqMask,__crMask,__dqMask) ####### Create the corr file __corrFile = np.zeros(__inputImage.shape,dtype=__inputImage.dtype) __corrFile = np.where(np.equal(__dqMask,0),__blotData,__inputImage) __corrDQMask = np.where(np.equal(__dqMask,0), paramDict['crbit'],0).astype(np.uint16) if paramDict['driz_cr_corr']: crcorr_list.append({'sciext':fileutil.parseExtn(exten), 'corrFile':__corrFile.copy(), 'dqext':fileutil.parseExtn(scienceChip.dq_extn), 'dqMask':__corrDQMask.copy()}) ######## Save the cosmic ray mask file to disk _cr_file = np.zeros(__inputImage.shape,np.uint8) _cr_file = np.where(__crMask,1,0).astype(np.uint8) if not paramDict['inmemory']: outfile = crMaskImage # Always write out crmaskimage, as it is required input for # the final drizzle step. The final drizzle step combines this # image with the DQ information on-the-fly. # # Remove the existing mask file if it exists if(os.access(crMaskImage, os.F_OK)): os.remove(crMaskImage) print("Removed old cosmic ray mask file:",crMaskImage) print('Creating output : ',outfile) else: print('Creating in-memory(virtual) FITS file...') outfile = None _pf = util.createFile(_cr_file, outfile=outfile, header = None) if paramDict['inmemory']: crMaskDict[crMaskImage] = _pf if paramDict['driz_cr_corr']: #util.createFile(__corrFile,outfile=crCorImage,header=None) createCorrFile(sciImage.outputNames["crcorImage"], crcorr_list, sciImage._filename) del crcorr_list if paramDict['inmemory']: sciImage.saveVirtualOutputs(crMaskDict) virtual_outputs = sciImage.virtualOutputs
def _drizCr(sciImage, virtual_outputs, paramDict): """mask blemishes in dithered data by comparison of an image with a model image and the derivative of the model image. sciImage is an imageObject which contains the science data blotImage is inferred from the sciImage object here which knows the name of its blotted image :) chip should be the science chip that corresponds to the blotted image that was sent paramDict contains the user parameters derived from the full configObj instance dgMask is inferred from the sciImage object, the name of the mask file to combine with the generated Cosmic ray mask here are the options you can override in configObj gain = 7 # Detector gain, e-/ADU grow = 1 # Radius around CR pixel to mask [default=1 for 3x3 for non-NICMOS] ctegrow = 0 # Length of CTE correction to be applied rn = 5 # Read noise in electrons snr = "4.0 3.0" # Signal-to-noise ratio scale = "0.5 0.4" # scaling factor applied to the derivative backg = 0 # Background value expkey = "exptime" # exposure time keyword blot images are saved out to simple fits files with 1 chip in them so for example in ACS, there will be 1 image file with 2 chips that is the original image and 2 blotted image files, each with 1 chip so I'm imagining calling this function twice, once for each chip, but both times with the same original science image file, output files and some input (output from previous steps) are referenced in the imageobject itself """ grow=paramDict["driz_cr_grow"] ctegrow=paramDict["driz_cr_ctegrow"] # try: # assert(chip != None), 'Please specify a chip to process for blotting' # assert(sciImage != None), 'Please specify a science image object for blotting' # except AssertionError: # print "Problem with value of chip or sciImage to drizCR" # print sciImage # raise # raise orig error crcorr_list =[] crMaskDict = {} for chip in range(1,sciImage._numchips+1,1): exten=sciImage.scienceExt + ',' +str(chip) scienceChip=sciImage[exten] if scienceChip.group_member: blotImagePar = 'blotImage' blotImageName = scienceChip.outputNames[blotImagePar] if sciImage.inmemory: __blotImage = sciImage.virtualOutputs[blotImageName] else: try: os.access(blotImageName,os.F_OK) except IOError: print("Could not find the Blotted image on disk:",blotImageName) raise # raise orig error try: __blotImage = fits.open(blotImageName,mode="readonly") # !!! ,memmap=False) ? except IOError: print("Problem opening blot images") raise #blotImageName=scienceChip.outputNames["blotImage"] # input file crMaskImage=scienceChip.outputNames["crmaskImage"] # output file ctedir=scienceChip.cte_dir #check that sciImage and blotImage are the same size? #grab the actual image from disk __inputImage=sciImage.getData(exten) # Apply any unit conversions to input image here for comparison # with blotted image in units of electrons __inputImage *= scienceChip._conversionFactor #make the derivative blot image __blotData=__blotImage[0].data*scienceChip._conversionFactor #simple fits __blotDeriv = quickDeriv.qderiv(__blotData) if not sciImage.inmemory: __blotImage.close() #this grabs the original dq mask from the science image # This mask needs to take into account any crbits values # specified by the user to be ignored. A call to the # buildMask() method may work better here... #__dq = sciImage.maskExt + ',' + str(chip) #__dqMask=sciImage.getData(__dq) __dqMask = sciImage.buildMask(chip,paramDict['crbit']) # both args are ints #parse out the SNR information __SNRList=(paramDict["driz_cr_snr"]).split() __snr1=float(__SNRList[0]) __snr2=float(__SNRList[1]) #parse out the scaling information __scaleList = (paramDict["driz_cr_scale"]).split() __mult1 = float(__scaleList[0]) __mult2 = float(__scaleList[1]) __gain=scienceChip._effGain __rn=scienceChip._rdnoise __backg = scienceChip.subtractedSky*scienceChip._conversionFactor # Define output cosmic ray mask to populate __crMask = np.zeros(__inputImage.shape,dtype=np.uint8) # Set scaling factor (used by MultiDrizzle) to 1 since scaling has # already been accounted for in blotted image __expmult = 1. ################## COMPUTATION PART I ################### # Create a temporary array mask __t1 = np.absolute(__inputImage - __blotData) __ta = np.sqrt(__gain * np.absolute(__blotData * __expmult + __backg * __expmult) + __rn * __rn) __tb = ( __mult1 * __blotDeriv + __snr1 * __ta / __gain ) del __ta __t2 = __tb / __expmult del __tb __tmp1 = np.logical_not(np.greater(__t1, __t2)) del __t1 del __t2 # Create a convolution kernel that is 3 x 3 of 1's __kernel = np.ones((3,3),dtype=np.uint8) # Create an output tmp file the same size as the input temp mask array __tmp2 = np.zeros(__tmp1.shape,dtype=np.int16) # Convolve the mask with the kernel NC.convolve2d(__tmp1,__kernel,output=__tmp2,fft=0,mode='nearest',cval=0) del __kernel del __tmp1 ################## COMPUTATION PART II ################### # Create the CR Mask __xt1 = np.absolute(__inputImage - __blotData) __xta = np.sqrt(__gain * np.absolute(__blotData * __expmult + __backg * __expmult) + __rn * __rn) __xtb = ( __mult2 *__blotDeriv + __snr2 * __xta / __gain ) del __xta __xt2 = __xtb / __expmult del __xtb # It is necessary to use a bitwise 'and' to create the mask with numarray objects. __crMask = np.logical_not(np.greater(__xt1, __xt2) & np.less(__tmp2,9) ) del __xt1 del __xt2 del __tmp2 ################## COMPUTATION PART III ################### #flag additional cte 'radial' and 'tail' pixels surrounding CR pixels as CRs # In both the 'radial' and 'length' kernels below, 0->good and 1->bad, so that upon # convolving the kernels with __crMask, the convolution output will have low->bad and high->good # from which 2 new arrays are created having 0->bad and 1->good. These 2 new arrays are then 'anded' # to create a new __crMask. # recast __crMask to int for manipulations below; will recast to Bool at end __crMask_orig_bool= __crMask.copy() __crMask= __crMask_orig_bool.astype( np.int8 ) # make radial convolution kernel and convolve it with original __crMask cr_grow_kernel = np.ones((grow, grow)) # kernel for radial masking of CR pixel cr_grow_kernel_conv = __crMask.copy() # for output of convolution NC.convolve2d( __crMask, cr_grow_kernel, output = cr_grow_kernel_conv) # make tail convolution kernel and convolve it with original __crMask cr_ctegrow_kernel = np.zeros((2*ctegrow+1,2*ctegrow+1)) # kernel for tail masking of CR pixel cr_ctegrow_kernel_conv = __crMask.copy() # for output convolution # which pixels are masked by tail kernel depends on sign of ctedir (i.e.,readout direction): if ( ctedir == 1 ): # HRC: amp C or D ; WFC: chip = sci,1 ; WFPC2 cr_ctegrow_kernel[ 0:ctegrow, ctegrow ]=1 # 'positive' direction if ( ctedir == -1 ): # HRC: amp A or B ; WFC: chip = sci,2 cr_ctegrow_kernel[ ctegrow+1:2*ctegrow+1, ctegrow ]=1 #'negative' direction if ( ctedir == 0 ): # NICMOS: no cte tail correction pass # do the convolution NC.convolve2d( __crMask, cr_ctegrow_kernel, output = cr_ctegrow_kernel_conv) # select high pixels from both convolution outputs; then 'and' them to create new __crMask where_cr_grow_kernel_conv = np.where( cr_grow_kernel_conv < grow*grow,0,1 ) # radial where_cr_ctegrow_kernel_conv = np.where( cr_ctegrow_kernel_conv < ctegrow, 0, 1 ) # length __crMask = np.logical_and( where_cr_ctegrow_kernel_conv, where_cr_grow_kernel_conv) # combine masks __crMask = __crMask.astype(np.uint8) # cast back to Bool del __crMask_orig_bool del cr_grow_kernel del cr_grow_kernel_conv del cr_ctegrow_kernel del cr_ctegrow_kernel_conv del where_cr_grow_kernel_conv del where_cr_ctegrow_kernel_conv # Apply CR mask to the DQ array in place np.bitwise_and(__dqMask,__crMask,__dqMask) ####### Create the corr file __corrFile = np.zeros(__inputImage.shape,dtype=__inputImage.dtype) __corrFile = np.where(np.equal(__dqMask,0),__blotData,__inputImage) __corrDQMask = np.where(np.equal(__dqMask,0), paramDict['crbit'],0).astype(np.uint16) if paramDict['driz_cr_corr']: crcorr_list.append({'sciext':fileutil.parseExtn(exten), 'corrFile':__corrFile.copy(), 'dqext':fileutil.parseExtn(scienceChip.dq_extn), 'dqMask':__corrDQMask.copy()}) ######## Save the cosmic ray mask file to disk _cr_file = np.zeros(__inputImage.shape,np.uint8) _cr_file = np.where(__crMask,1,0).astype(np.uint8) if not paramDict['inmemory']: outfile = crMaskImage # Always write out crmaskimage, as it is required input for # the final drizzle step. The final drizzle step combines this # image with the DQ information on-the-fly. # # Remove the existing mask file if it exists if(os.access(crMaskImage, os.F_OK)): os.remove(crMaskImage) print("Removed old cosmic ray mask file:",crMaskImage) print('Creating output : ',outfile) else: print('Creating in-memory(virtual) FITS file...') outfile = None _pf = util.createFile(_cr_file, outfile=outfile, header = None) if paramDict['inmemory']: crMaskDict[crMaskImage] = _pf if paramDict['driz_cr_corr']: #util.createFile(__corrFile,outfile=crCorImage,header=None) createCorrFile(sciImage.outputNames["crcorImage"], crcorr_list, sciImage._filename) del crcorr_list if paramDict['inmemory']: sciImage.saveVirtualOutputs(crMaskDict) virtual_outputs = sciImage.virtualOutputs
def detSources( image, outfile="", verbose=False, sigma=0.0, threshold=2.5, fwhm=5.5, sharplim=[0.2,1.0], roundlim=[-1.0,1.0], window=None, exts=None, timing=False, grid=False, rejection=None, ratio=None, drawWindows=False, dispFrame=1 ): """ Performs similar to the source detecting algorithm 'http://idlastro.gsfc.nasa.gov/ftp/pro/idlphot/find.pro'. This code is heavily influenced by 'http://idlastro.gsfc.nasa.gov/ftp/pro/idlphot/find.pro'. 'find.pro' was written by W. Landsman, STX February, 1987. This code was converted to Python with areas re-written for optimization by: River Allen, Gemini Observatory, December 2009. [email protected] Sources: [1] - W. Landsman. http://idlastro.gsfc.nasa.gov/ftp/pro/idlphot/find.pro @param image: The filename of the fits file. It must be in the format N2.fits[1] for the specific extension. (i.e.) If you want to find objects only in the image extension [1], than you would pass N2.fits[1]. @type filename: String @param outfile: The name of the file where the output will be written. By default output will not be written (ie if outfile is left as "", no output file is written). @type outfile: String @param verbose: Print out non-critical and debug information. @type verbose: Boolean @param sigma: The mean of the background value. If nothing is passed, detSources will run background() to determine it. @type sigma: Number @param threshold: "Threshold intensity for a point source - should generally be 3 or 4 sigma above background RMS"[1]. It was found that 2.5 works best for IQ source detection. @type threshold: Number @param fwhm: "FWHM to be used in the convolve filter"[1]. This ends up playing a factor in determining the size of the kernel put through the gaussian convolve. @type fwhm: Number @param sharplim: "2 element vector giving low and high cutoff for the sharpness statistic (Default: [0.2,1.0] ). Change this default only if the stars have significantly larger or smaller concentration than a Gaussian"[1] @type sharplim: 2-Element List of Numbers @param roundlim: "2 element vector giving low and high cutoff for the roundness statistic (Default: [-1.0,1.0] ). Change this default only if the stars are significantly elongated."[1] @type roundlim: 2-Element List of Numbers @param window: Rectangle regions of the data to process. detSources will only look at the data within windows passed, if a window is passed. If no window is set, detSources will look at the entire image. Beware: small objects on the edges of the windows may not be detected. <pre> General Coordinate Form: ( x_offset, y_offset, width, height ) (x_offset + width, y_offset + height) __________ / | Window | |__________| / (x_offset, y_offset) Example: Window=[(0,0,200,200)] ~~ Looks at a window of size 200, 200 in bottom left corner Window=[(0,0,halfWidth,Height),(halfWidth,0,halfWidth,Height)] ~~ Splits the image in 2, divided vertically down the middle. </pre> @type window: List of 4 dimensional tuples or None @param timing: If timing is set to true, the return type for detSources will be a tuple. The tuple is of the form (xyArray, overalltime) where overalltime represents the time it took detSources to run minus any displaying time. This feature is for engineering purposes. @type timing: Boolean @param grid: If no window is set, detSources will run the image in a grid. This is supposed to work in conjunction with rejection. @type grid: Boolean @param rejection: Rejection functions to be run on each grid point. See baseHeuristic() for an example. @type rejection: A list of rejection functions or None @param ratio: What the ratio or grid size should be. Ratio of 5 means the image will be split up into a 5x5 grid. Should be modified to take fixe grid size (50,50), for example. @type ratio: int @param drawWindows: If this is set to True, will attempt to draw the windows using iraf.tvmark(). Beware: a ds9 must be running. @type drawWindows: Boolean @param dispFrame: This works in conjunction with drawWindows. debug=False, grid=False, rejection=None, ratio=None, drawWindows=False, dispFrame=1 @return: A List of centroids. For example: [[ 626.66661222, 178.89720247], [ 718.1319315 , 2265.69332291], [ 783.03009601, 13.21621043], [ 1161.89652591, 2149.35972066], [ 1228.65067586, 1873.15018455], [ 1339.96915669, 725.79570466], [ 1477.96348539, 1107.85307289], [ 1485.17058871, 2059.1712877 ], [ 1501.959992 , 227.32708114], [ 2003.10937888, 572.89806682], [ 2217.95000197, 763.01713875], [ 2407.5780915 , 2018.30400873]] @rtype: 2-D List. """ #=========================================================================== # Parameter Checking #=========================================================================== # image = paramutil.checkParam( image, str, "" ) if image == "": raise "daoFind requires an image file." imageName, exts = paramutil.checkFileFitExtension( image ) if verbose: print "Opening and Loading: %s[%d]"% (imageName,exts) hdu = pf.open( imageName ) if window is not None: if type(window) == tuple: window = [window] elif type(window) == list: pass else: raise "'window' must be a tuple of length 4, or a list of tuples length 4." for wind in window: if type(wind) == tuple: if len(wind) == 4: continue else: raise 'A window tuple has incorrect information, %s, require x,y,width,height' %(str(wind)) else: raise 'The window list contains a non-tuple. %s' %(str(wind)) if type( exts ) != int and exts is not None: raise 'exts must be int or None.' # outfile = paramutil.checkParam( outfile, str, "" ) writeOutFlag = False if outfile != "": writeOutFlag = True # fwhm = paramutil.checkParam( fwhm, type(0.0), 5.5, 0.0 ) # verbose = paramutil.checkParam( verbose, bool, False ) if len(sharplim) < 2: raise "Sharplim parameter requires 2 num elements. (i.e. [0.2,1.0])" if len(roundlim) < 2: raise "Roundlim parameter requires 2 num elements. (i.e. [-1.0,1.0])" if verbose: print "Opened and loaded." #------------------------------------------------------------------------------ #=========================================================================== # Setup #=========================================================================== ost = time.time() maxConvSize = 13 #Maximum size of convolution box in pixels radius = maximum(0.637 * fwhm, 2.001) #Radius is 1.5 sigma radiusSQ = radius ** 2 kernelHalfDimension = minimum(array(radius, copy=0).astype(int32), (maxConvSize - 1) / 2) kernelDimension = 2 * kernelHalfDimension + 1 # Dimension of the kernel or "convolution box" sigSQ = (fwhm / 2.35482) ** 2 # Mask identifies valid pixels in convolution box mask = zeros([kernelDimension, kernelDimension], int8) # g will contain Gaussian convolution kernel gauss = zeros([kernelDimension, kernelDimension], float32) row2 = (arange(kernelDimension) - kernelHalfDimension) ** 2 for i in arange(0, (kernelHalfDimension)+(1)): temp = row2 + i ** 2 gauss[kernelHalfDimension - i] = temp gauss[kernelHalfDimension + i] = temp mask = array(gauss <= radiusSQ, copy=0).astype(int32) #MASK is complementary to SKIP in Stetson's Fortran good = where(ravel(mask))[0] #Value of c are now equal to distance to center pixels = good.size # Compute quantities for centroid computations that can be used for all stars gauss = exp(-0.5 * gauss / sigSQ) """ In fitting Gaussians to the marginal sums, pixels will arbitrarily be assigned weights ranging from unity at the corners of the box to kernelHalfDimension^2 at the center (e.g. if kernelDimension = 5 or 7, the weights will be 1 2 3 4 3 2 1 1 2 3 2 1 2 4 6 8 6 4 2 2 4 6 4 2 3 6 9 12 9 6 3 3 6 9 6 3 4 8 12 16 12 8 4 2 4 6 4 2 3 6 9 12 9 6 3 1 2 3 2 1 2 4 6 8 6 4 2 1 2 3 4 3 2 1 respectively). This is done to desensitize the derived parameters to possible neighboring, brighter stars.[1] """ xwt = zeros([kernelDimension, kernelDimension], float32) wt = kernelHalfDimension - abs(arange(kernelDimension).astype(float32) - kernelHalfDimension) + 1 for i in arange(0, kernelDimension): xwt[i] = wt ywt = transpose(xwt) sgx = sum(gauss * xwt, 1) sumOfWt = sum(wt) sgy = sum(gauss * ywt, 0) sumgx = sum(wt * sgy) sumgy = sum(wt * sgx) sumgsqy = sum(wt * sgy * sgy) sumgsqx = sum(wt * sgx * sgx) vec = kernelHalfDimension - arange(kernelDimension).astype(float32) dgdx = sgy * vec dgdy = sgx * vec sdgdxs = sum(wt * dgdx ** 2) sdgdx = sum(wt * dgdx) sdgdys = sum(wt * dgdy ** 2) sdgdy = sum(wt * dgdy) sgdgdx = sum(wt * sgy * dgdx) sgdgdy = sum(wt * sgx * dgdy) kernel = gauss * mask #Convolution kernel now in c sumc = sum(kernel) sumcsq = sum(kernel ** 2) - (sumc ** 2 / pixels) sumc = sumc / pixels # The reason for the flatten is because IDL and numpy treat statements like arr[index], where index # is an array, differently. For example, arr.shape = (100,100), in IDL index=[400], arr[index] # would work. In numpy you need to flatten in order to get the arr[4][0] you want. kshape = kernel.shape kernel = kernel.flatten() kernel[good] = (kernel[good] - sumc) / sumcsq kernel.shape = kshape # Using row2 here is pretty confusing (From IDL code) # row2 will be something like: [1 2 3 2 1] c1 = exp(-.5 * row2 / sigSQ) sumc1 = sum(c1) / kernelDimension sumc1sq = sum(c1 ** 2) - sumc1 c1 = (c1 - sumc1) / sumc1sq mask[kernelHalfDimension,kernelHalfDimension] = 0 # From now on we exclude the central pixel pixels = pixels - 1 # so the number of valid pixels is reduced by 1 # What this operation looks like: # ravel(mask) = [0 0 1 1 1 0 0 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 1 ...] # where(ravel(mask)) = (array([ 2, 3, 4, 8, 9, 10, 11, 12, 14, ...]),) good = where(ravel(mask))[0] # "good" identifies position of valid pixels # x and y coordinate of valid pixels xx = (good % kernelDimension) - kernelHalfDimension # relative to the center yy = array(good / kernelDimension, copy=0).astype(int32) - kernelHalfDimension #------------------------------------------------------------------------------ #=========================================================================== # Extension and Window / Grid #=========================================================================== xyArray = [] outputLines = [] if exts is None: # May want to include astrodata here to deal with # all 'SCI' extensions, etc. exts = 1 sciData = hdu[exts].data if sigma <= 0.0: sigma = background( sciData ) if verbose: print 'Estimated Background:', sigma hmin = sigma * threshold if window is None: # Make the window the entire image window = [(0,0,sciData.shape[1],sciData.shape[0])] if grid: ySciDim, xSciDim = sciData.shape xgridsize = int(xSciDim / ratio) ygridsize = int(ySciDim / ratio) window = [] for ypos in range(ratio): for xpos in range(ratio): window.append( (xpos * xgridsize, ypos * ygridsize, xgridsize, ygridsize) ) drawtime = 0 if drawWindows: drawtime = draw_windows( window, dispFrame, label=True) if rejection is None: rejection = [] elif rejection is 'default': rejection = [baseHeuristic] windName = 0 for wind in window: windName += 1 subXYArray = [] ##@@TODO check for negative values, check that dimensions don't violate overall dimensions. yoffset, xoffset, yDimension, xDimension = wind if verbose: print 'x,y,w,h: ', xoffset, yoffset, xDimension, yDimension print '='*50 print 'W' + str(windName) print '='*50 sciSection = sciData[xoffset:xoffset+xDimension,yoffset:yoffset+yDimension] #======================================================================= # Quickly determine if a window is worth processing #======================================================================= rejFlag = False for rejFunc in rejection: if rejFunc(sciSection, sigma, threshold): rejFlag = True break if rejFlag: # Reject continue #------------------------------------------------------------------------------ #=========================================================================== # Convolve #=========================================================================== if verbose: print "Beginning convolution of image" st = time.time() h = convolve2d( sciSection, kernel ) # Convolve image with kernel et = time.time() if verbose: print 'Convole Time:', ( et-st ) if not grid: h[0:kernelHalfDimension,:] = 0 h[xDimension - kernelHalfDimension:xDimension,:] = 0 h[:,0:kernelHalfDimension] = 0 h[:,yDimension - kernelHalfDimension:yDimension] = 0 if verbose: print "Finished convolution of image" #------------------------------------------------------------------------------ #=========================================================================== # Filter #=========================================================================== offset = yy * xDimension + xx index = where(ravel(h >= hmin))[0] # Valid image pixels are greater than hmin nfound = index.size if nfound > 0: # Any maxima found? h = h.flatten() for i in arange(pixels): # Needs to be changed try: stars = where(ravel(h[index] >= h[index+ offset[i]]))[0] except: break nfound = stars.size if nfound == 0: # Do valid local maxima exist? if verbose: print "No objects found." break index = index[stars] h.shape = (xDimension, yDimension) ix = index % yDimension # X index of local maxima iy = index / yDimension # Y index of local maxima ngood = index.size else: if verbose: print "No objects above hmin (%s) were found." %(str(hmin)) continue # Loop over star positions; compute statistics st = time.time() for i in arange(ngood): temp = array(sciSection[iy[i] - kernelHalfDimension:(iy[i] + kernelHalfDimension)+1, ix[i] - kernelHalfDimension:(ix[i] + kernelHalfDimension)+1]) pixIntensity = h[iy[i],ix[i]] # pixel intensity # Compute Sharpness statistic #@@FIXME: This should do proper checking...the issue is an out of range index with kernelhalf and temp # IndexError: index (3) out of range (0<=index<=0) in dimension 0 try: sharp1 = (temp[kernelHalfDimension,kernelHalfDimension] - (sum(mask * temp)) / pixels) / pixIntensity except: continue if (sharp1 < sharplim[0]) or (sharp1 > sharplim[1]): # Reject # not sharp enough? continue dx = sum(sum(temp, 1) * c1) dy = sum(sum(temp, 0) * c1) if (dx <= 0) or (dy <= 0): # Reject continue around = 2 * (dx - dy) / (dx + dy) # Roundness statistic # Reject if not within specified roundness boundaries. if (around < roundlim[0]) or (around > roundlim[1]): # Reject continue """ Centroid computation: The centroid computation was modified in Mar 2008 and now differs from DAOPHOT which multiplies the correction dx by 1/(1+abs(dx)). The DAOPHOT method is more robust (e.g. two different sources will not merge) especially in a package where the centroid will be subsequently be redetermined using PSF fitting. However, it is less accurate, and introduces biases in the centroid histogram. The change here is the same made in the IRAF DAOFIND routine (see http://iraf.net/article.php?story=7211&query=daofind ) [1] """ sd = sum(temp * ywt, 0) sumgd = sum(wt * sgy * sd) sumd = sum(wt * sd) sddgdx = sum(wt * sd * dgdx) hx = (sumgd - sumgx * sumd / sumOfWt) / (sumgsqy - sumgx ** 2 / sumOfWt) # HX is the height of the best-fitting marginal Gaussian. If this is not # positive then the centroid does not make sense. [1] if (hx <= 0): # Reject continue skylvl = (sumd - hx * sumgx) / sumOfWt dx = (sgdgdx - (sddgdx - sdgdx * (hx * sumgx + skylvl * sumOfWt))) / (hx * sdgdxs / sigSQ) if abs(dx) >= kernelHalfDimension: # Reject continue xcen = ix[i] + dx #X centroid in original array # Find Y centroid sd = sum(temp * xwt, 1) sumgd = sum(wt * sgx * sd) sumd = sum(wt * sd) sddgdy = sum(wt * sd * dgdy) hy = (sumgd - sumgy * sumd / sumOfWt) / (sumgsqx - sumgy ** 2 / sumOfWt) if (hy <= 0): # Reject continue skylvl = (sumd - hy * sumgy) / sumOfWt dy = (sgdgdy - (sddgdy - sdgdy * (hy * sumgy + skylvl * sumOfWt))) / (hy * sdgdys / sigSQ) if abs(dy) >= kernelHalfDimension: # Reject continue ycen = iy[i] + dy #Y centroid in original array subXYArray.append( [xcen, ycen] ) et = time.time() if verbose: print 'Looping over Stars time:', ( et - st ) subXYArray = averageEachCluster( subXYArray, 10 ) xySize = len(subXYArray) for i in range( xySize ): subXYArray[i] = subXYArray[i].tolist() # I have no idea why the positions are slightly modified. Was done originally in # iqTool, perhaps for minute correcting. subXYArray[i][0] += 1 subXYArray[i][1] += 1 subXYArray[i][0] += yoffset subXYArray[i][1] += xoffset if writeOutFlag: outputLines.append( " ".join( [str(subXYArray[i][0]), str(subXYArray[i][1])] )+"\n" ) xyArray.extend(subXYArray) oet = time.time() overall_time = (oet-ost-drawtime) if verbose: print 'No. of objects detected:', len(xyArray) print 'Overall time:', overall_time, 'seconds.' if writeOutFlag: outputFile = open( outfile, "w" ) outputFile.writelines( outputLines ) outputFile.close() if timing: return xyArray, overall_time else: return xyArray