def loadImage(self,photList,firstSec=0,integrationTime=-1,wvlMin=None,wvlMax=None, doStack=False, #expWeightTimeStep=None, savePreStackImage=None, doWeighted=True): #savePreStackImage is temporary for test purposes ''' Build a de-rotated stacked image from a photon list (PhotList) object. If the RADecImage instance already contains an image, the new image is added to it. INPUTS: photList - a PhotList object from which to construct the image. firstSec - time from start of exposure to start the 'integration' for the image (seconds) integrationTime - duration of integration time to include in the image (in seconds; -1 or NaN => to end of exposure) wvlMin, wvlMax - min and max wavelengths of photons to include in the image (Angstroms). doStack - boolean; if True, then stack the image to be loaded on top of any image data already present. #### DEPRECATED - NOW GETS TIME STEPS STRAIGHT FROM CENTROID LIST FILES ##### expWeightTimeStep - see __init__. If set here, overrides any value already set in the RADecImage object. If the new image is being stacked on top of a current image, a new value can be supplied that is different from the current image's value; but only the last value used (i.e. the one supplied) will be stored in the class attribute. ################################ wvlMin, wvlMax - set min and max wavelength cutoffs for photons to be loaded in. savePreStackImage - temporary fudge, set to a file-name to save the image out to a file prior to stacking. doWeighted - if True, includes flat and flux weighting (i.e. flatfielding and spectral response)factors from photons, and rejects photons from pixels where the flatfield is bad at any wavelength within the requested wavelength range (all if wvlMin/wvl Max not specified). ****NOTE - FLUX WEIGHTING NOT FULLY TESTED -- but looks probably okay.**** ''' #posErr = 0.8 #Approx. position error in arcsec (just a fixed estimate for now, will improve later) #posErr *= 2*np.pi/(60.*60.*360.) #Convert to radians imLoadTic = time.clock() photTable = photList.file.root.photons.photons #Shortcut to table #if expWeightTimeStep is not None: # self.expWeightTimeStep=expWeightTimeStep if wvlMin is not None and wvlMax is None: wvlMax = np.inf if wvlMin is None and wvlMax is not None: wvlMin = 0.0 #Figure out last second of integration obsFileExpTime = photList.header.cols.exptime[0] if integrationTime==-1 or firstSec+integrationTime > obsFileExpTime: lastSec = obsFileExpTime else: lastSec = firstSec+integrationTime #If virtual coordinate grid is not yet defined, figure it out. if self.gridRA is None or self.gridDec is None: #Find RA/dec range needed, taking advantage of the fact that the ra/dec columns are (or should be) indexed.... print 'Finding RA/dec ranges' self.raMin = photTable.cols.ra[photTable.colindexes['ra'][0]] self.raMax = photTable.cols.ra[photTable.colindexes['ra'][-1]] self.decMin = photTable.cols.dec[photTable.colindexes['dec'][0]] self.decMax = photTable.cols.dec[photTable.colindexes['dec'][-1]] self.cenRA = (self.raMin+self.raMax)/2.0 self.cenDec = (self.decMin+self.decMax)/2.0 #Set size of virtual grid to accommodate. if self.nPixRA is None: #+1 for round up; +1 because coordinates are the boundaries of the virtual pixels, not the centers. self.nPixRA = int((self.raMax-self.raMin)//self.vPlateScale + 2) if self.nPixDec is None: self.nPixDec = int((self.decMax-self.decMin)//self.vPlateScale + 2) self.setCoordGrid() #Short-hand notations for no. of detector and virtual pixels, just for clarity: nDPixRow,nDPixCol = photList.nRow,photList.nCol nVPixRA,nVPixDec = self.nPixRA,self.nPixDec #Calculate ratio of virtual pixel area to detector pixel area vdPixAreaRatio = (self.vPlateScale/self.detPlateScale)**2 #Make a boolean mask of dead (non functioning for whatever reason) pixels #True (1) = good; False (0) = dead #First on the basis of the wavelength cals: wvlCalFlagImage = photList.getBadWvlCalFlags() deadPixMask = np.where(wvlCalFlagImage == pipelineFlags.waveCal['good'], 1, 0) #1.0 where flag is good; 0.0 otherwise. (Straight boolean mask would work, but not guaranteed for Python 4....) #Next on the basis of the flat cals (or all ones if weighting not requested) if doWeighted: flatCalFlagArray = photList.file.root.flatcal.flags.read() # 3D array - nRow * nCol * nWavelength Bins. flatWvlBinEdges = photList.file.root.flatcal.wavelengthBins.read() # 1D array of wavelength bin edges for the flat cal. lowerEdges = flatWvlBinEdges[0:-1] upperEdges = flatWvlBinEdges[1:] if wvlMin is None and wvlMax is None: inRange = np.ones(len(lowerEdges),dtype=bool) # (all bins in range implies all True) else: inRange = ((lowerEdges >= wvlMin) & (lowerEdges < wvlMax) | (upperEdges >= wvlMin) & (lowerEdges < wvlMax)) flatCalMask = np.where(np.all(flatCalFlagArray[:,:,inRange]==False, axis=2), 1, 0) # Should be zero where any pixel has a bad flag at any wavelength within the requested range; one otherwise. Spot checked, seems to work. else: flatCalMask = np.ones((nDPixRow,nDPixCol)) #If hot pixels time-mask data not already parsed in, then parse it. if photList.hotPixTimeMask is None: photList.parseHotPixTimeMask() #Loads time mask dictionary into photList.hotPixTimeMask #First find start/end times of each timestep ('frame') for calculating effective exp. times #and for subdividing the image data (the latter is only needed for the purposes of #splitting the data into small chunks so it'll fit in memory easily). #Use the same timesteps as used in calculating the astrometry. tStartFramesAll = np.array(photList.file.root.centroidList.times.read()) #Convert to array, since it's saved as a list. tEndFramesAll = np.append(tStartFramesAll[1:], np.inf) #Last frame goes on forever as far as we know at the moment withinIntegration = ((tStartFramesAll < lastSec) & (tEndFramesAll > firstSec)) tStartFrames = tStartFramesAll[withinIntegration].clip(min=firstSec) #Now clip so that everything is within the requested integration time. tEndFrames = tEndFramesAll[withinIntegration].clip(max=lastSec) nFrames = len(tStartFrames) assert nFrames > 0 #Otherwise we have a problem.... assert np.all(tStartFrames <= lastSec) and np.all(tEndFrames >= firstSec) #Get x,y locations of detector pixel corners (2D array of each x,y value, in detector space) dPixXmin = np.indices((nDPixRow,nDPixCol))[1] - 0.5 dPixXmax = np.indices((nDPixRow,nDPixCol))[1] + 0.5 dPixYmin = np.indices((nDPixRow,nDPixCol))[0] - 0.5 dPixYmax = np.indices((nDPixRow,nDPixCol))[0] + 0.5 dPixXminFlat = dPixXmin.flatten() #Flattened versions of the same since getRaDec() only works on flat arrays. dPixXmaxFlat = dPixXmax.flatten() dPixYminFlat = dPixYmin.flatten() dPixYmaxFlat = dPixYmax.flatten() #Create (1D) arrays for normalised center locations of virtual pixel grid (=index numbers, representing location of unit squares) vPixRANormCen = np.arange(nVPixRA) #np.indices(nVPixDec,nVPixRA)[1] vPixDecNormCen = np.arange(nVPixDec) #np.indices(nVPixDec,nVPixRA)[0] #Create 1D arrays marking edges of virtual pixels (in 'normalised' space...) vPixRANormMin = np.arange(nVPixRA)-0.5 vPixRANormMax = np.arange(nVPixRA)+0.5 vPixDecNormMin = np.arange(nVPixDec)-0.5 vPixDecNormMax = np.arange(nVPixDec)+0.5 #Find origin of virtual array (center of virtual pixel 0,0) in RA/dec space. vPixOriginRA = np.mean(self.gridRA[0:2]) vPixOriginDec = np.mean(self.gridDec[0:2]) vPixSize = self.vPlateScale #Short hand, Length of side of virtual pixel in radians (assume square pixels) #Make arrays to take the total exposure times and image data for each virtual pixel at each time step vExpTimesStack = np.zeros((nVPixDec,nVPixRA,nFrames)) imageStack = np.zeros((nVPixDec,nVPixRA,nFrames)) #And one for the total exposure time at each pixel summed over all time steps vExpTimes = np.zeros((nVPixDec,nVPixRA)) #Array to hold list of (equal) timestamps for each pixel at each timestep #(just for calculating the RA/dec coordinates of the pixel corners) frameTimeFlat = np.zeros((nDPixRow*nDPixCol)) #Also flat array for the purposes of getRaDec() frameTimeFlat.fill(np.nan) #Initialise RA/dec calculations of pixel locations for exposure time weighting raDecCalcObject = crd.CalculateRaDec(photList.file.root.centroidList) #------------ Loop through the time steps ---------- for iFrame in range(nFrames): print 'Time slice: ',iFrame+1, '/', nFrames #-------------Make image for this time step----------- #Get the photons print 'Getting photon coords' print 'wvlMin, wvlMax: ',wvlMin,wvlMax if wvlMin is None: assert wvlMin is None and wvlMax is None print '(getting all wavelengths)' tic = time.clock() strt, fin = tStartFrames[iFrame], tEndFrames[iFrame] #Just because Numexpr can't handle indexing, it seems photons = photTable.readWhere('(arrivalTime>=strt) & (arrivalTime<fin)') #photons = np.array([row.fetch_all_fields() for row in photTable.where('(arrivalTime>=strt) & (arrivalTime<=fin)')]) #photIndices = photTable.getWhereList('(arrivalTime>=strt) & (arrivalTime<=fin)') print 'Time taken (s): ',time.clock()-tic else: assert wvlMin is not None and wvlMax is not None print '(trimming wavelength range) ' photons = photTable.readWhere('(arrivalTime>=strt) & (arrivalTime<=fin) & (wavelength>=wvlMin) & (wavelength<=wvlMax)') #Filter out photons to be masked out on the basis of detector pixel print 'Finding bad detector pixels...' detPixMask = deadPixMask * flatCalMask #Combine wave cal pixel mask and flat cal mask (should be the same in an ideal world, but not whereBad = np.where(detPixMask == 0) badXY = pl.xyPack(whereBad[0],whereBad[1]) #Array of packed x-y values for bad pixels (CHECK X,Y THE RIGHT WAY ROUND!) allPhotXY = photons['xyPix'] #Array of packed x-y values for all photons #Get a boolean array indicating photons whose packed x-y coordinate value is in the 'bad' list. toReject = np.where(np.in1d(allPhotXY,badXY))[0] #Zero to take index array out of the returned 1-element tuple. #Chuck out the bad photons print 'Rejecting photons from bad pixels...' photons = np.delete(photons,toReject) #Pull out needed information print 'Pulling out relevant columns' photRAs = photons['ra'] #Read all photon coords into an RA and a dec array. photDecs = photons['dec'] photHAs = photons['ha'] #Along with hour angles... photWeights = photons['flatWeight'] * photons['fluxWeight'] #********EXPERIMENTING WITH ADDING FLUX WEIGHT - NOT FULLY TESTED, BUT SEEMS OKAY....******** print 'INCLUDING FLUX WEIGHTS!' photWavelengths = photons['wavelength'] del(photons) #Not needed till next iteration, and it takes up a lot of memory.... if wvlMin is not None or wvlMax is not None: assert all(photWavelengths>=wvlMin) and all(photWavelengths<=wvlMax) print 'Min, max photon wavelengths found: ', np.min(photWavelengths), np.max(photWavelengths) nPhot = len(photRAs) #Add uniform random dither to each photon, distributed over a square #area of the same size and orientation as the originating pixel at #the time of observation. xRand = np.random.rand(nPhot)*self.detPlateScale-self.detPlateScale/2.0 yRand = np.random.rand(nPhot)*self.detPlateScale-self.detPlateScale/2.0 #Not the same array! ditherRAs = xRand*np.cos(photHAs) - yRand*np.sin(photHAs) ditherDecs = yRand*np.cos(photHAs) + xRand*np.sin(photHAs) photRAs=photRAs+ditherRAs photDecs=photDecs+ditherDecs #Make the image for this time slice if doWeighted: print 'Making weighted image' imageStack[:,:,iFrame],thisGridDec,thisGridRA = np.histogram2d(photDecs,photRAs,[self.gridDec,self.gridRA], weights=photWeights) else: print 'Making unweighted image' imageStack[:,:,iFrame],thisGridDec,thisGridRA = np.histogram2d(photDecs,photRAs,[self.gridDec,self.gridRA]) if savePreStackImage is not None: saveName = 'det'+str(start)+'-'+str(fin)+'s-'+savePreStackImage print 'Making det-frame image for diagnostics: '+saveName detImSlice = np.histogram2d(photons['yPix'],photons['xPix']) mpl.imsave(fname=saveName,arr=detImSlice,origin='lower', cmap=mpl.cm.gray,vmin=np.percentile(detImSlice, 0.5), vmax=np.percentile(detImSlice,99.5)) #----------Now start figuring out effective exposure times for each virtual pixel---------------- #And start figuring out the exposure time weights.... print 'Calculating effective exposure times' #Calculate detector pixel corner locations in RA/dec space (needs to be clockwise in RA/dec space! (checked, gives +ve answers). frameTimeFlat.fill(tStartFrames[iFrame]) dPixRA1,dPixDec1,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXminFlat,dPixYminFlat) #dPix* should all be flat dPixRA2,dPixDec2,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXminFlat,dPixYmaxFlat) dPixRA3,dPixDec3,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXmaxFlat,dPixYmaxFlat) dPixRA4,dPixDec4,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXmaxFlat,dPixYminFlat) #Normalise to scale where virtual pixel size=1 and origin is the origin of the virtual pixel grid dPixNormRA1 = (dPixRA1 - vPixOriginRA)/vPixSize #dPixNorm* should all be flat. dPixNormRA2 = (dPixRA2 - vPixOriginRA)/vPixSize dPixNormRA3 = (dPixRA3 - vPixOriginRA)/vPixSize dPixNormRA4 = (dPixRA4 - vPixOriginRA)/vPixSize dPixNormDec1 = (dPixDec1 - vPixOriginDec)/vPixSize dPixNormDec2 = (dPixDec2 - vPixOriginDec)/vPixSize dPixNormDec3 = (dPixDec3 - vPixOriginDec)/vPixSize dPixNormDec4 = (dPixDec4 - vPixOriginDec)/vPixSize #Get min and max RA/decs for each of the detector pixels dPixCornersRA = np.array([dPixNormRA1,dPixNormRA2,dPixNormRA3,dPixNormRA4]) #2D array, 4 by nRow*nCol - should be clockwise, I think! dPixCornersDec = np.array([dPixNormDec1,dPixNormDec2,dPixNormDec3,dPixNormDec4]) #dPixCornersRA = np.array([dPixNormRA4,dPixNormRA3,dPixNormRA2,dPixNormRA1]) #2D array, 4 by nRow*nCol - reversed, but gives -ve results, so prob. anti-clockwise.... #dPixCornersDec = np.array([dPixNormDec4,dPixNormDec3,dPixNormDec2,dPixNormDec1]) dPixRANormMin = dPixCornersRA.min(axis=0) #Flat 1D array, nRow * nCol dPixRANormMax = dPixCornersRA.max(axis=0) dPixDecNormMin = dPixCornersDec.min(axis=0) dPixDecNormMax = dPixCornersDec.max(axis=0) #Get array of effective exposure times for each detector pixel based on the hot pixel time mask #Multiply by the bad pixel mask and the flatcal mask so that non-functioning pixels have zero exposure time. #Flatten the array in the same way as the previous arrays (1D array, nRow*nCol elements). detExpTimes = (hp.getEffIntTimeImage(photList.hotPixTimeMask, integrationTime=tEndFrames[iFrame]-tStartFrames[iFrame], firstSec=tStartFrames[iFrame]) * detPixMask).flatten() #Loop over the detector pixels and accumulate the exposure time that falls in each #tic = time.clock() for iDPix in np.arange(nDPixRow * nDPixCol): #Find the pixels which are likely to be overlapping (note - could do this as a sorted search to make things faster) maybeOverlappingRA = np.where((dPixRANormMax[iDPix] > vPixRANormMin) & (dPixRANormMin[iDPix] < vPixRANormMax))[0] maybeOverlappingDec = np.where((dPixDecNormMax[iDPix] > vPixDecNormMin) & (dPixDecNormMin[iDPix] < vPixDecNormMax))[0] for overlapLocRA in maybeOverlappingRA: for overlapLocDec in maybeOverlappingDec: overlapFrac = boxer.boxer(overlapLocDec,overlapLocRA,dPixCornersDec[:,iDPix],dPixCornersRA[:,iDPix]) expTimeToAdd = overlapFrac*detExpTimes[iDPix] vExpTimesStack[overlapLocDec,overlapLocRA,iFrame] += expTimeToAdd #print 'Time taken (s): ',time.clock()-tic #------------ End loop through time steps ---------- #Sum up the exposure times from each frame: vExpTimes = np.sum(vExpTimesStack,axis=2) thisImage = np.sum(imageStack,axis=2) #Check that wherever the exposure time is zero, there are no photons that have not been rejected #assert np.all(thisImage[vExpTimes==0] == 0) #assert 1==0 #Temporary for testing------------- if savePreStackImage is not None: print 'Saving pre-stacked image to '+savePreStackImage mpl.imsave(fname=savePreStackImage,arr=thisImage,origin='lower',cmap=mpl.cm.gray, vmin=np.percentile(thisImage, 0.5), vmax=np.percentile(thisImage,99.5)) #--------------------------------- if self.imageIsLoaded is False or doStack is False: self.image = thisImage #For now, let's keep it this way.... Since weighting does odd things. self.effIntTimes = vExpTimes self.totExpTime = lastSec-firstSec self.expTimeWeights = self.totExpTime/self.effIntTimes self.vExpTimesStack = vExpTimesStack #TEMPORARY FOR DEBUGGING PURPOSES self.imageIsLoaded = True else: assert self.imageIsLoaded == True print 'Stacking' self.image += thisImage self.effIntTimes += vExpTimes self.totExpTime += lastSec-firstSec self.expTimeWeights = self.totExpTime/self.effIntTimes print 'Image load done. Time taken (s): ', time.clock()-imLoadTic
def hotPixelsTest3(): ''' Check that effective exposure times returned by getPixelCount and getPixelCountImage are correct by comparing the returned values with estimates made by counting the number of non-zero bins in a histogram of photon arrival times. Jun 21 2013 - Now also incorporates test of new hotpixels.getEffIntTimeImage() function. ''' dir = '/Users/vaneyken/Data/UCSB/ARCONS/Palomar2012/hotPixTest2/' obsFileName = 'obs_20121209-044636.h5' wvlCalFileName = 'calsol_20121209-060704.h5' flatCalFileName = 'flatsol_20121210.h5' hotPixFileName = 'hotPix_20121209-044636.h5' startTime = 10 integrationTime = 13 #Note - for now, don't set to -1, as it won't work with getEffIntTimeImage() timeBinSize = 0.25 #Time bin size for histogramming up photon times for a pixel to check that calculated effective exposure times are correct (to within ~ a bin size) testPixRow = 0 #Just prints out some stats on this particular pixel location for sanity checking testPixCol = 14 obsFile = of.ObsFile(dir + obsFileName) obsFile.loadWvlCalFile(dir + wvlCalFileName) obsFile.loadFlatCalFile(dir + flatCalFileName) print 'Loading hot pixel file into obsFile...' obsFile.loadHotPixCalFile(dir + hotPixFileName) obsFile.setWvlCutoffs() #Test one pixel badInt = obsFile.getPixelBadTimes(testPixRow, testPixCol) gpc = obsFile.getPixelCount(testPixRow, testPixCol, firstSec=startTime, integrationTime=integrationTime, weighted=False, fluxWeighted=False, getRawCount=True) print print 'Start time, integration time:', startTime, integrationTime print 'Bad interval: ', badInt print 'getPixelCount: ', gpc print #Test an image (approximately) print 'Getting image...' im = obsFile.getPixelCountImage(startTime, integrationTime) print 'Testing output against getEffIntTimeImage() function' effIntTimeImage = hp.getEffIntTimeImage(obsFile.hotPixTimeMask, integrationTime=integrationTime, firstSec=startTime) assert np.all(effIntTimeImage == im['effIntTimes']) print 'Output matches.' print print 'Comparing effective integration times against photon timestamps...' print print 'iRow, iCol, getPixelCountImage eff. int time, estimated actual eff. int time (approx):' for iRow in range(np.shape(im['image'])[0]): for iCol in range(np.shape(im['image'])[1]): x = obsFile.getTimedPacketList(iRow, iCol, startTime, integrationTime) timestamps, tplEffIntTime = x['timestamps'], x['effIntTime'] if im['image'][iRow, iCol] == 0: continue #If the pixel is dead (no photons) the following tests won't work anyway. #Make a histogram of photon arrival times so we can add up the total #time where photons were detected and compare with the expected #effective integration time. Should be about the same! hist, binEdges = np.histogram(timestamps, bins=integrationTime / timeBinSize, range=(startTime, startTime + integrationTime)) n = sum(hist > 0) #Number of time bins which contain photons approxEffIntTime = n * timeBinSize calculatedEffIntTime = im['effIntTimes'][iRow, iCol] print iRow, iCol, calculatedEffIntTime, approxEffIntTime #Should be about equal to the effective integration time returned by #getPixelCountImage (within errors ~ the time bin size). if np.median(hist) > 7: assert abs(approxEffIntTime - calculatedEffIntTime) < 2 * timeBinSize else: print 'Photon counts too low to get good statistics' #And darned well should equal the effIntTime returned by, #getTimedPacketList since getPixelImageCount called the same routine.... assert calculatedEffIntTime == tplEffIntTime print 'Done. All seems in good order.'