def testGetImageDet(
    fileName=FileName(run="PAL2012", date="20121208", tstamp="20121209-120530").photonList(),
    firstSec=0,
    integrationTime=-1,
    newMethod=True,
    doWeighted=False,
):

    plFile = photlist.PhotList(fileName)

    try:
        tic = time.time()
        image = plFile.getImageDet(
            firstSec=firstSec,
            integrationTime=integrationTime,
            newMethod=newMethod,
            wvlMin=4000,
            wvlMax=6000,
            doWeighted=doWeighted,
        )
        tElapsed = time.time() - tic
    finally:
        # plFile.close()
        print "Deleting file instance"
        del plFile
    plotArray(image)
    print "Done, time taken (s): ", tElapsed
    return image
    def display(self,normMin=None,normMax=None,expWeight=True,pclip=None,colormap=mpl.cm.gnuplot2,
                image=None, logScale=False):
        '''
        Display the current image. Currently just a short-cut to utils.plotArray,
        but needs updating to mark RA and Dec on the axes.
        '''
        if expWeight:
            toDisplay = np.copy(self.image*self.expTimeWeights)
        else:
            toDisplay = np.copy(self.image)
        
        if logScale is True: toDisplay = np.log10(toDisplay)
        
        if image is not None: toDisplay = image
        
        if pclip:
            normMin = np.percentile(toDisplay[np.isfinite(toDisplay)],q=pclip)
            normMax = np.percentile(toDisplay[np.isfinite(toDisplay)],q=100.0-pclip)

        #Display NaNs as zeros so it looks better
        toDisplay[np.isnan(toDisplay)] = 0
        
        #Find the coordinates of the centers of the virtual pixels in degrees
        #raMin = (self.gridRA[0:-1] + self.gridRA[1:])/2.0 / np.pi * 180.
        #dec = (self.gridDec[0:-1] + self.gridDec[1:])/2.0 / np.pi * 180.
        
        utils.plotArray(toDisplay,cbar=True,normMin=normMin,normMax=normMax,colormap=colormap)
 def testPlotArray(self):
     "exercise the plotArray function and make the file testPlotArray.png"
     xyarray = np.arange(20).reshape((4,5)) - 5
     fn1 = inspect.stack()[0][3]+".png"
     utils.plotArray(xyarray, showMe=False, cbar=True,
                     cbarticks=[-4, 1,2,4,8,16],
                     cbarlabels=['negative four', 'one','two','four','eight','sixteen'],
                     plotTitle='This is the Plot Title!',
                     colormap=mpl.cm.terrain,
                     pixelsToMark=[(0,1)],
                     pixelMarkColor='red',
                     plotFileName=fn1,
                     sigma=2.0)
 def displayImageDet(self, firstSec=0,integrationTime=-1,wvlMin=-np.Inf,
              wvlMax=np.Inf, normMin=None, normMax=None, showHotPix=False):
     '''
     Display an image built from the photon list in detector coordinate space
     '''
     im = self.getImageDet(firstSec=firstSec, integrationTime=integrationTime, wvlMin=wvlMin, wvlMax=wvlMax)
     utils.plotArray(im, normMin=normMin, normMax=normMax, cbar=True, fignum=None)
     if showHotPix is True:
         if self.hotPixTimeMask is None:
             self.parseHotPixTimeMask()
         badPix = hp.getHotPixels(self.hotPixTimeMask, firstSec=firstSec, integrationTime=integrationTime)
         x = np.arange(self.nCol)
         y = np.arange(self.nRow)
         xx, yy = np.meshgrid(x, y)
         if np.sum(badPix) > 0: mpl.scatter(xx[badPix], yy[badPix], c='y')
def displayCentroidResult(obsFileIn, time):
    '''
    To show an image with the location of the centroid
    measured by CentroidCalc marked on top. Use for diagnostic purposes.
    
    INPUTS:
        obsFileIn - either an ObsFile instance or the filename of an 
                  obsFile to load. If a filename, the file will be closed
                  on completion; if an ObsFile instance, it'll be left 
                  alone.
        time - time since beginning of file (in seconds) to display the 
                centroid for.
    
    OUTPUTS:
        A reconstructed image with the calculated centroid plotted on top.
        The image will be integrated over whatever time slice was used
        by CentroidCalc for calculating the centroid at the given input time.
    '''
    
    if type(obsFileIn)=='str':
        obsFile = ObsFile.ObsFile(obsFileIn)
    else:
        obsFile = obsFileIn
    
    ctrdFileName = FileName.FileName(obsFile=obsFile).centroidList()
    ctrdFile = tables.openFile(ctrdFileName, mode='r')

    #Get the boundaries of the time slices used for centroiding
    #(in seconds from start of array.)    
    sliceTimes = tables.root.centroidlist.times.read()
    xPositions = tables.root.centroidlist.xPositions.read()
    yPositions = tables.root.centroidlist.yPositions.read()    
    iSliceEnd = np.searchsorted(ctrdtimes, time)
    iSliceStart = iSliceEnd-1
    sliceStartTime = sliceTimes[iSliceStart]
    sliceEndTime = sliceTimes[iSliceEnd]
    sliceXpos,sliceYpos = xPositions[iSliceStart],ypositions[iSliceStart]
    
    #Integrate to get the corresponding image
    im = obsFile.getPixelCountImage(sliceStartTime, sliceEndTime-sliceStartTime,
                                    getRawCount=True)
    
    #And display the result....
    utils.plotArray(im, sigma=3, cbar=True, plotTitle=os.path.basename(ctrdFileName)+', '
                    +str(sliceStartTime)+' - '+ str(sliceEndTime)+'sec', 
                    pixelsToMark=[sliceXpos,sliceYpos], fignum=None)
    
    
def testGetPixelCountImage(bins=250, integrationTime=1):
    '''
    Do two runs of getPixelCountImage and compare the results
    to check the repeatability (i.e., test the degree of 
    effect of the random dithering in the wavelength handling.)
    
    INPUTS:
        bins - set the number of bins for the output histogram
        
    OUTPUTS:
        Displays the two images in ds9, as well as image1 divided
        by image2. Also shows the latter in a regular plot, as well
        as a histogram of the image1/image2 ratios over all pixels.
    '''
    
    obsfile = loadTestObsFile.loadTestObsFile()
    obsfile.setWvlCutoffs(4000,8000)
    
    #Get first image
    im1 = obsfile.getPixelCountImage(firstSec=0, integrationTime=30, weighted=True,
                                     fluxWeighted=False, getRawCount=False, 
                                     scaleByEffInt=False)['image']
    
    #Get supposedly identical image
    im2 = obsfile.getPixelCountImage(firstSec=0, integrationTime=30, weighted=True,
                                     fluxWeighted=False, getRawCount=False,
                                     scaleByEffInt=False)['image']
    
    utils.ds9Array(im1,frame=1)
    utils.ds9Array(im2,frame=2)
    
    divim = im1/im2
    
    utils.ds9Array(divim,frame=3)
    utils.plotArray(divim, colormap=pl.cm.hot, cbar=True, normMax=np.mean(divim)+2*np.std(divim))
    
    toHist = divim.flatten()
    toHist = toHist[np.isfinite(toHist)]
    pl.figure()
    pl.hist(toHist,bins=bins)
    pl.title('Ratio of image1/image2, wavelength range '+str(obsfile.wvlLowerLimit)
             +'-'+str(obsfile.wvlUpperLimit)+'Ang')
    pl.xlabel('Ratio')
    pl.ylabel('Number of pixels')
    
    print 'Mean image1/image2: ',np.mean(toHist)
    print 'Std. dev image1/image2: ',np.std(toHist)
    
def makeImageStack(fileNames='photons_*.h5', dir=os.getenv('MKID_PROC_PATH', 
                   default="/Scratch")+'/photonLists/20121211',
                   detImage=False, saveFileName='stackedImage.pkl', wvlMin=3500,
                   wvlMax=12000, doWeighted=True, medCombine=False, vPlateScale=0.2,
                   nPixRA=250,nPixDec=250,maxBadPixTimeFrac=0.2,integrationTime=-1,
                   outputdir=''):
    '''
    Create an image stack
    INPUTS:
        filenames - string, list of photon-list .h5 files. Can either
                    use wildcards (e.g. 'mydirectory/*.h5') or if string
                    starts with an @, supply a text file which contains
                    a list of file names to stack. (e.g.,
                    'mydirectory/@myfilelist.txt', where myfilelist.txt 
                    is a simple text file with one file name per line.)
        dir - to provide name of a directory in which to find the files
        detImage - if True, show the images in detector x,y coordinates instead
                    of transforming to RA/dec space.
        saveFileName - name of output pickle file for saving final resulting object.
        doWeighted - boolean, if True, do the image flatfield weighting.
        medCombine - experimental, if True, do a median combine of the image stack
                     instead of just adding them all.... Prob. should be implemented
                     properly at some point, just a fudge for now.
        vPlateScale - (arcsec/virtual pixel) - to set the plate scale of the virtual
                     pixels in the outputs image.
        nPixRA,nPixDec - size of virtual pixel grid in output image.
        maxBadPixTimeFrac - Maximum fraction of time which a pixel is allowed to be 
                     flagged as bad (e.g., hot) for before it is written off as
                     permanently bad for the duration of a given image load (i.e., a
                     given obs file).
        integrationTime - the integration time to use from each input obs file (from 
                     start of file).
    OUTPUTS:
        Returns a stacked image object, saves the same out to a pickle file, and
        (depending whether it's still set to or not) saves out the individual non-
        stacked images as it goes. 
    '''
    

    #Get the list of filenames
    if fileNames[0]=='@':
        #(Note, actually untested, but should be more or less right...)
        files=[]
        with open(fileNames[1:]) as f:
            for line in f:
                files.append(os.path.join(dir,line.strip()))
    else:
        files = glob.glob(os.path.join(dir, fileNames))

    #Initialise empty image centered on Crab Pulsar
    virtualImage = rdi.RADecImage(nPixRA=nPixRA,nPixDec=nPixDec,vPlateScale=vPlateScale,
                                  cenRA=1.4596725441339724, cenDec=0.38422539085925933)
    imageStack = []
                                  
    for eachFile in files:
        if os.path.exists(eachFile):
            print 'Loading: ',os.path.basename(eachFile)
            #fullFileName=os.path.join(dir,eachFile)
            phList = pl.PhotList(eachFile)
            baseSaveName,ext=os.path.splitext(os.path.basename(eachFile))
            
            if detImage is True:
                imSaveName=os.path.join(outputdir,baseSaveName+'det.tif')
                im = phList.getImageDet(wvlMin=wvlMin,wvlMax=wvlMax)
                utils.plotArray(im)
                mpl.imsave(fname=imSaveName,arr=im,colormap=mpl.cm.gnuplot2,origin='lower')
                if eachFile==files[0]:
                    virtualImage=im
                else:
                    virtualImage+=im
            else:
                imSaveName=os.path.join(outputdir,baseSaveName+'.tif')
                virtualImage.loadImage(phList,doStack=not medCombine,savePreStackImage=imSaveName,
                                       wvlMin=wvlMin, wvlMax=wvlMax, doWeighted=doWeighted,
                                       maxBadPixTimeFrac=maxBadPixTimeFrac, integrationTime=integrationTime)
                imageStack.append(virtualImage.image*virtualImage.expTimeWeights)       #Only makes sense if medCombine==True, otherwise will be ignored
                if medCombine==True:
                    medComImage = scipy.stats.nanmedian(np.array(imageStack), axis=0)
                    toDisplay = np.copy(medComImage)
                    toDisplay[~np.isfinite(toDisplay)] = 0
                    utils.plotArray(toDisplay,pclip=0.1,cbar=True,colormap=mpl.cm.gray)
                else:
                    virtualImage.display(pclip=0.5,colormap=mpl.cm.gray)
                    medComImage = None

            mpl.show() 


        else:
            print 'File doesn''t exist: ',eachFile
    
    #Save the results.
    #Note, if median combining, 'vim' will only contain one frame. If not, medComImage will be None.
    results = {'vim':virtualImage,'imstack':imageStack,'medim':medComImage}

    try:
        output = open(os.path(outputdir,saveFileName),'wb')
        pickle.dump(results,output,-1)
        output.close()
            
    except:
        warnings.warn('Unable to save results for some reason...')
    
    return results
ofs.setRm(degPerPix,
          math.degrees(theta),
          raArcsecPerSec,
        )

#
# Make a FITS file of each frame of the cube
for iFrame in range(66):
    frame = ofs.cubes[iFrame]['cube'].sum(axis=2)
    hdu = pyfits.PrimaryHDU(frame)
    fn = "%s-%02d.fit"%(ofs.name,iFrame)
    print "now make fn=",fn
    hdu.writeto(fn)

# Whew!  Now do the coaddition for all of the sequences.
# This uses all wavelengths.  The first improvement will be to
# have it use a subset of the wavelength bins.
mosaic = ofs.makeMosaicImage(range(66))

# Make a simple plot for now.  You can also save data as a FITS file, or combine
# the frames for three different wavelengths into a fabulous color picture!
# But right now, let's just dump out a heat map so we something to show off.
utils.plotArray(mosaic,cbar=True,plotTitle=ofs.name,showMe=False,plotFileName=ofs.name+"-all.png")

# Write it out as a FITS file, too
hdu = pyfits.PrimaryHDU(mosaic)
fn = "%s-all.fit"%ofs.name
hdu.writeto(fn)
del ofs
        for y in ally:
            if (np.abs(x-startpx))**2+(np.abs(y-startpy))**2 <= (r)**2 and 0 <= y and y < 46 and 0 <= x and x < 44:
                mask[y,x]=0.
    return mask

def gaussian(height, center_x, center_y, width_x, width_y,offset):
    """Returns a gaussian function with the given parameters"""
    width_x = float(width_x)
    width_y = float(width_y)
    return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)+offset

stackDict = np.load('nlttImageStackBlue15.npz')
stack = stackDict['stack']
if len(sys.argv) == 1:
    print 'Useage: ',sys.argv[0],' iFrame'
    print """
    set0 Frames 0-179
    """
    exit(1)
iFrame = int(sys.argv[1])
frame = stack[:,:,iFrame]
#    plt.hist(np.ravel(frame),bins=100,range=(0,5000))
#    plt.show()

nanMask = np.isnan(frame)
frame[nanMask] = 0
frame = np.ma.masked_array(frame,mask=nanMask)
utils.plotArray(frame,cbar=True)

    
    nlttTimesByPixels.append(timesInPixel)
    img[y,x] = sum(inPixel['FlatWeight'])
    rawImg[y,x] = len(inPixel)

nlttPSF = np.concatenate(nlttPSFByPixels)
nlttPSFTimes = np.concatenate(nlttTimesByPixels)
secsInDay = 24*60*60.
period = 0.2350606*secsInDay
phases = (nlttPSFTimes % period)/(period)
newtype=[('ArrivalTime', '<f8'),('Flag', '|u1'), ('Phase', '<f4'), ('PixelCol', '|u1'), ('PixelRow', '|u1'), ('Wavelength', '<f4'), ('Weight', '<f4')]
nPhotons = len(nlttPSF)
newTable = np.recarray(nPhotons,dtype=newtype)
newTable['ArrivalTime'] = nlttPSFTimes
newTable['Flag'] = nlttPSF['Flag']
newTable['Phase'] = phases



timestream,timeEdges = np.histogram(nlttPSFTimes,weights=nlttPSF['FlatWeight'],bins=300*len(timestampList))
phaseTimestream,phaseTimeEdges = np.histogram(phases,weights=nlttPSF['FlatWeight'],bins=30*len(timestampList))

plt.plot(timeEdges[:-1],timestream)
plt.show()
counts = [len(pixelPhotons) for pixelPhotons in nlttPSFByPixels]
utils.plotArray(img,cbar=True,normMax=800000)
utils.plotArray(rawImg,cbar=True)
print img[31,29]
print rawImg[31,29]
tbl = np.vstack([timeEdges[:-1],timestream])
np.savetxt(out,tbl.T)
def main():

    obsSequence0 = """
    051516
    052520
    """
    obsSequence1 = """
    033323
    041843
    045902
    """

    obsSequence2 = """
    050404
    054424
    """

    obsSequence3 = """
    054926
    062942
    """

    run = "PAL2012"
    obsSequences = [obsSequence1, obsSequence2, obsSequence3]
    wvlCals = ["063518", "063518", "063518"]
    flatCals = ["20121211", "20121211", "20121211"]
    fluxCalDates = ["20121206", "20121206", "20121206"]
    fluxCals = ["20121207-072055", "20121207-072055", "20121207-072055"]

    # Row coordinate of center of crab pulsar for each obsSequence
    centersRow = [29, 29, 10]
    # Col coordinate of center of crab pulsar for each obsSequence
    centersCol = [29, 30, 14]

    obsUtcDate = "20121212"
    obsUtcDates = ["20121212", "20121212", "20121212"]

    obsFileNames = []
    obsFileNameTimestamps = []
    wvlFileNames = []
    flatFileNames = []
    fluxFileNames = []
    timeMaskFileNames = []

    for iSeq in range(len(obsSequences)):
        obsSequence = obsSequences[iSeq]
        obsSequence = obsSequence.strip().split()
        obsFileNameTimestamps.append(obsSequence)
        obsUtcDate = obsUtcDates[iSeq]
        sunsetDate = str(int(obsUtcDate) - 1)
        obsSequence = [obsUtcDates[iSeq] + "-" + ts for ts in obsSequence]
        obsFileNames.append([FileName(run=run, date=sunsetDate, tstamp=ts).obs() for ts in obsSequence])
        timeMaskFileNames.append([FileName(run=run, date=sunsetDate, tstamp=ts).timeMask() for ts in obsSequence])
        wvlCalTstamp = obsUtcDate + "-" + wvlCals[iSeq]
        wvlFileNames.append(FileName(run=run, date=sunsetDate, tstamp=wvlCalTstamp).calSoln())
        fluxFileNames.append(FileName(run=run, date=fluxCalDates[iSeq], tstamp=fluxCals[iSeq]).fluxSoln())
        flatFileNames.append(FileName(run=run, date=flatCals[iSeq], tstamp="").flatSoln())

    for iSeq, obsSequence in enumerate(obsSequences):
        obsSequence = obsSequence.strip().split()
        print obsSequence
        for iOb, obs in enumerate(obsSequence):
            timeMaskFileName = timeMaskFileNames[iSeq][iOb]
            if not os.path.exists(timeMaskFileName):
                print "Running hotpix for ", obs
                hp.findHotPixels(obsFileNames[iSeq][iOb], timeMaskFileName)
                print "Flux file pixel mask saved to %s" % (timeMaskFileName)

    apertureRadius = 4
    obLists = [[ObsFile(fn) for fn in seq] for seq in obsFileNames]
    tstampFormat = "%H:%M:%S"
    # print 'fileName','headerUnix','headerUTC','logUnix','packetReceivedUnixTime'
    for iSeq, obList in enumerate(obLists):
        for iOb, ob in enumerate(obList):
            print ob.fileName
            centerRow = centersRow[iSeq]
            centerCol = centersCol[iSeq]
            circCol, circRow = circ(centerCol, centerRow)
            ob.loadTimeAdjustmentFile(FileName(run="PAL2012").timeAdjustments())
            ob.loadWvlCalFile(wvlFileNames[iSeq])
            ob.loadFlatCalFile(flatFileNames[iSeq])
            ob.loadFluxCalFile(fluxFileNames[iSeq])
            timeMaskFileName = timeMaskFileNames[iSeq][iOb]
            ob.loadHotPixCalFile(timeMaskFileName)
            ob.setWvlCutoffs(None, None)

    for iSeq, obList in enumerate(obLists):
        for iOb, ob in enumerate(obList):
            print ob.fileName

            centerRow = centersRow[iSeq]
            centerCol = centersCol[iSeq]
            circCol, circRow = circ(centerCol, centerRow)
            imgDict = ob.getPixelCountImage()
            img = imgDict["image"]
            utils.plotArray(img, showMe=False)
            aperture = plt.Circle((centerCol, centerRow), rad, fill=False, color="g")
            aperture2 = plt.Circle((centerCol, centerRow), 2 * rad, fill=False, color="g")
            plt.gca().add_patch(aperture)
            plt.gca().add_patch(aperture2)
            plt.show()
def quantifyBadTime(inputFileName, startTime=0, endTime=-1, 
                    defaultTimeMaskFileName='./testTimeMask.h5',
                    timeStep=1, fwhm=3.0, boxSize=5, nSigmaHot=3.0,
                    nSigmaCold=2.5,maxIter=5,binWidth=3,
                    dispToPickle=False, showHist=False, bkgdPercentile=50,
                    weighted=False,fluxWeighted=False, useRawCounts=True):
    '''
    Function to calculate various metrics for the degree of bad pixel behaviour in a raw 
    raw obs file. Calculates the mean total hot/cold/dead time per good pixel (i.e., per 
    pixel which is not permanently dead, hot, or cold).
    
    Makes a couple of heat maps showing time spent bad in each way for each pixel,
    as well as a histogram of times spent bad for the *temporarily* bad pixels.
    JvE Nov 20 2013.
    
    The parameters for the finding algorithm may need to be tuned a little,
    but the defaults should basically work, in principle. nSigmaHot and nSigmaCold
    are good places to start if things need playing around with.
    
    e.g. call, in principle:
    
        from hotpix import quantifyHotTime as qht
        qht.quantifyHotTime('/Users/vaneyken/Data/UCSB/ARCONS/turkDataCopy/ScienceData/PAL2012/20121208/obs_20121209-120530.h5')
    
    - should be all it needs....
    
    
    INPUTS:
    
        inputFileName - either a raw obs. filename or a time mask file. If the former, 
                        runs a hot pixel search; otherwise uses time mask file instead.
        
        startTime, endTime - start and end times within the obs file to
                        calculate the hot pixels for. (endTime =-1 means to end of file).
        
        defaultTimeMaskFileName - use this filename to output new time mask to
                                (if inputFileName is an obs file)
    
        binWidth - width of time bins for plotting the bad-time histogram (seconds)

        dispToPickle - if not False, saves the data for the histogram plot to a pickle file.
                       If a string, then uses that as the name for the pickle file. Otherwise
                       saves to a default name. Saves a dictionary with four entries, the first
                       three of which are each a flat array of total times spent bad for every
                       pixel (in sec) ('hotTime','coldTime','deadTime'). The fourth, 'duration',
                       is the duration of the input time-mask in seconds.
        
        showHist - if True, plot up a histogram of everything. Currently fails though 
                   if there are no bad-flagged intervals in any of the type categories
                   (or their intersections, hot+cold, cold+dead).
    
    
        Parameters passed on to findHotPixels routine if called (see also documentation
        for that function):
    
        timeStep        #Check for hot pixels every timeStep seconds
    
        fwhm            #Expected full width half max of PSF in pixels. Any pixel
                        #with flux much tighter than this will be flagged as bad.
                        #Larger value => more sensitive hot pixel flagging.
    
        boxSize         #Compare flux in each pixel with median flux in a 
                        #surrounding box of this size on a side.
    
        nSigmaHot       #Require flux in a pixel to be > nSigmaHot std. deviations
                        #above the max expected for a Gaussian PSF in order for it
                        #to be flagged as hot. Larger value => less sensitive flagging.
    
        nSigmaCold      #Require flux to be < nSigmaCold std. deviations below the median 
                        #in a surrounding box in order to be flagged as cold (where std.
                        #deviation is estimated as the square root of the median flux).
    
        maxIter         #Max num. of iterations for the bad pixel detection algorithm.
        
        bkdgPercentile, weighted, fluxWeighted    - See findHotPixels() in hotpix.hotpixels.py


    OUTPUTS:
        
        A bunch of statistics on the different kinds of bad pixel behaviour, and a 
        'heat' plot showing how much time each pixel was bad in the array, for each
        type of behaviour. In theory it can plot a histogram of everything too, but
        currently it breaks if there are no bad intervals within any of the type
        categories (hot only, hot and cold, cold only, cold and dead, dead only...)
    
    '''
    
    defaultPklFileName = 'badPixTimeHist.pickle'
    
    #Check whether the input file is a time mask or a regular obs file.
    absInputFileName = os.path.abspath(inputFileName)   #To avoid weird issues with the way findHotPixels expands paths....
    hdffile = tables.openFile(absInputFileName)
    inputIsTimeMaskFile = '/timeMasks' in hdffile   
    hdffile.close()
    
    #Decide whether to generate a new time mask file or not
    if inputIsTimeMaskFile:
        print 'Input file is a time mask file'
        timeMaskFileName = absInputFileName
    else:
        print 'Assuming input file is an obs. file'
        timeMaskFileName = os.path.abspath(defaultTimeMaskFileName)
        if os.path.exists(timeMaskFileName):
            response=''
            while response != 'u' and response !='r':
                response = raw_input(timeMaskFileName+' already exists - "u" to use this (default) or "r" to regenerate? ')
                response = response.strip().lower()
                if response == '': response = 'u'
        else:
            response = 'r'         #If the file *didn't already exist, pretend the user entered 'r' despite not having been asked.
            
        if response == 'r':
                #Make/regenerate the hot pixel file.
                print 'Making new hot pixel time mask file '+timeMaskFileName
                hp.findHotPixels(inputFileName=absInputFileName,
                                 outputFileName=timeMaskFileName,
                                 startTime=startTime,
                                 endTime=endTime,
                                 timeStep=timeStep, fwhm=fwhm,
                                 boxSize=boxSize, nSigmaHot=nSigmaHot, nSigmaCold=nSigmaCold,
                                 display=True, dispToPickle=dispToPickle,
                                 maxIter=maxIter, bkgdPercentile=bkgdPercentile,
                                 weighted=weighted,fluxWeighted=fluxWeighted,useRawCounts=useRawCounts)
    
    
    #Read in the time mask file and calculate hot, cold, and 'other' bad time per pixel.
    timeMask = hp.readHotPixels(timeMaskFileName)
    hotTime = np.zeros((timeMask.nRow,timeMask.nCol))
    coldTime = np.zeros((timeMask.nRow,timeMask.nCol))
    deadTime = np.zeros((timeMask.nRow,timeMask.nCol))
    otherTime = np.zeros((timeMask.nRow,timeMask.nCol))
    hotIntervals = np.array([])
    coldIntervals = np.array([])
    deadIntervals = np.array([])
    otherIntervals = np.array([])

    reasonStringMap = timeMask.reasonEnum
    for iRow in range(timeMask.nRow):
        for iCol in range(timeMask.nCol):
            for eachInterval,eachReasonCode in zip(timeMask.intervals[iRow,iCol], timeMask.reasons[iRow,iCol]):
                eachReasonString = reasonStringMap(eachReasonCode)   #Convert integer code to human readable string
                intSize = utils.intervalSize(eachInterval)
                if eachReasonString == 'hot pixel':
                    hotTime[iRow,iCol] += intSize
                    hotIntervals = np.append(hotIntervals, intSize)
                elif eachReasonString == 'cold pixel':
                    coldTime[iRow,iCol] += intSize
                    coldIntervals = np.append(coldIntervals, intSize)
                elif eachReasonString == 'dead pixel':
                    deadTime[iRow,iCol] += intSize
                    deadIntervals = np.append(deadIntervals, intSize)
                else:
                    otherTime[iRow,iCol] += intSize
                    otherIntervals = np.append(otherIntervals, intSize)

    
    if np.size(hotIntervals) == 0: hotIntervals = np.array([-1])
    if np.size(coldIntervals) == 0: coldIntervals = np.array([-1])
    if np.size(deadIntervals) == 0: deadIntervals = np.array([-1])
    if np.size(otherIntervals) == 0: otherIntervals = np.array([-1])
        
    totBadTime = hotTime+coldTime+deadTime+otherTime
    
    maskDuration = timeMask.endTime-timeMask.startTime
    
    #Figure out which pixels are hot, cold, permanently hot, temporarily cold, etc. etc.
    nPix = timeMask.nRow * timeMask.nCol
    hotPix = hotTime > 0.1
    coldPix = coldTime > 0.1
    deadPix = deadTime > 0.1
    otherPix = otherTime > 0.1
    multiBehaviourPix = ( (np.array(hotPix,dtype=int)+np.array(coldPix,dtype=int)
                         +np.array(deadPix,dtype=int)+np.array(otherPix,dtype=int)) > 1)
    #assert np.all(deadTime[deadPix] == maskDuration)      #All dead pixels should be permanently dead....
    
    tol = timeStep/10. #Tolerance for the comparison operators below.
    
    permHotPix = hotTime >= maskDuration-tol
    permColdPix = coldTime >= maskDuration-tol
    permDeadPix = deadTime >= maskDuration-tol
    permOtherPix = otherTime >= maskDuration-tol
    permGoodPix = (hotTime+coldTime+deadTime+otherTime < tol)
    permBadPix = permHotPix | permColdPix | permDeadPix | permOtherPix
    
    tempHotPix = (hotTime < maskDuration-tol) & (hotTime > tol)
    tempColdPix = (coldTime < maskDuration-tol) & (coldTime > tol)
    tempDeadPix = (deadTime < maskDuration-tol) & (deadTime > tol)
    tempOtherPix = (otherTime < maskDuration-tol) & (otherTime > tol)
    tempGoodPix = tempHotPix | tempColdPix | tempDeadPix | tempOtherPix     #Bitwise or should work okay with boolean arrays
    tempBadPix = tempGoodPix        #Just to be explicit about it....
    
    nGoodPix = np.sum(permGoodPix | tempGoodPix)        #A 'good pixel' is either temporarily or permanently good
    
    #assert np.sum(tempDeadPix) == 0             #Shouldn't be any temporarily dead pixels. APPARENTLY THERE ARE....
    assert np.all((tempHotPix & permHotPix)==False)     #A pixel can't be permanently AND temporarily hot
    assert np.all((tempColdPix & permColdPix)==False)   #... etc.
    assert np.all((tempOtherPix & permOtherPix)==False)
    assert np.all((tempDeadPix & permDeadPix)==False)

    
    #Print out the results
    print '----------------------------------------------'
    print
    print '# pixels total: ', nPix
    print 'Mask duration (sec): ', maskDuration
    print
    print '% hot pixels: ', float(np.sum(permHotPix+tempHotPix))/nPix * 100.
    print '% cold pixels: ', float(np.sum(permColdPix+tempColdPix))/nPix * 100.
    print '% dead pixels: ', float(np.sum(permDeadPix+tempDeadPix))/nPix * 100.
    print '% other pixels: ', float(np.sum(permOtherPix+tempOtherPix))/nPix * 100.
    print
    print '% permanently hot pixels: ', float(np.sum(permHotPix))/nPix * 100.
    print '% permanently cold pixels: ', float(np.sum(permColdPix))/nPix * 100.
    print '% permanently dead pixels: ', float(np.sum(permDeadPix))/nPix * 100.
    print '% permanently "other" bad pixels: ', float(np.sum(permOtherPix))/nPix * 100.
    print
    print '% temporarily hot pixels: ', float(np.sum(tempHotPix))/nPix * 100.
    print '% temporarily cold pixels: ', float(np.sum(tempColdPix))/nPix * 100.
    print '% temporarily dead pixels: ', float(np.sum(tempDeadPix))/nPix * 100.
    print '% temporarily "other" bad pixels: ', float(np.sum(tempOtherPix))/nPix * 100.
    print
    print '% pixels showing multiple bad behaviours: ', float(np.sum(multiBehaviourPix))/nPix * 100.
    print
    print '% permanently bad pixels: ', float(np.sum(permBadPix))/nPix * 100.
    print '% temporarily bad pixels: ', float(np.sum(tempGoodPix))/nPix * 100.      #Temp. good == temp. bad!
    print '% permanently good pixels: ', float(np.sum(permGoodPix))/nPix * 100.
    print
    print 'Mean temp. hot pixel time per good pixel: ', np.sum(hotTime[tempHotPix])/nGoodPix
    print 'Mean temp. hot pixel time per temp. hot pixel: ', np.sum(hotTime[tempHotPix])/np.sum(tempHotPix)
    print
    print 'Mean temp. cold pixel time per good pixel: ', np.sum(coldTime[tempColdPix])/nGoodPix
    print 'Mean temp. cold pixel time per temp. cold pixel: ', np.sum(coldTime[tempColdPix])/np.sum(tempColdPix)
    print
    print 'Mean temp. "other" bad pixel time per good pixel: ', np.sum(otherTime[tempOtherPix])/nGoodPix
    print 'Mean temp. "other" bad pixel time per temp. "other" pixel: ', np.sum(otherTime[tempOtherPix])/np.sum(tempOtherPix)
    print
    print '(All times in seconds)'
    print
    print 'Done.'
    print
    if np.sum(tempOtherPix) > 0 or np.sum(permOtherPix) > 0:
        print '--------------------------------------------------------'
        print 'WARNING: Pixels flagged for "other" reasons detected - '
        print 'Histogram plot will not account for these!!'
        print '--------------------------------------------------------'

    #Display contour plots of the array of total bad times for each pixel for each type of behaviour
    utils.plotArray(hotTime, plotTitle='Hot time per pixel (sec)', fignum=None, cbar=True)
    utils.plotArray(coldTime, plotTitle='Cold time per pixel (sec)', fignum=None, cbar=True)
    utils.plotArray(otherTime, plotTitle='Other bad time per pixel (sec)', fignum=None, cbar=True)
    utils.plotArray(deadTime, plotTitle='Dead time per pixel (sec)', fignum=None, cbar=True)
    
    #Make histogram of time spent 'bad' for the temporarily bad pixels.
    #Ignore pixels flagged as bad for 'other' reasons (other than hot/cold/dead),
    #of which there should be none at the moment.
    assert np.all(otherPix == False)
    #Find total bad time for pixels which go only one of hot, cold, or dead
    onlyHotBadTime = totBadTime[hotPix & ~coldPix & ~deadPix]
    onlyColdBadTime = totBadTime[~hotPix & coldPix & ~deadPix]
    onlyDeadBadTime = totBadTime[~hotPix & ~coldPix & deadPix]
    #Find total bad time for pixels which alternate between more than one bad state
    hotAndColdBadTime = totBadTime[hotPix & coldPix & ~deadPix]
    hotAndDeadBadTime = totBadTime[hotPix & ~coldPix & deadPix]
    coldAndDeadBadTime = totBadTime[~hotPix & coldPix & deadPix]
    hotAndColdAndDeadBadTime = totBadTime[hotPix & coldPix & deadPix]
    allGoodBadTime = totBadTime[~hotPix & ~coldPix & ~deadPix]
    assert np.sum(allGoodBadTime) == 0

    if dispToPickle is not False:
        #Save to pickle file to feed into a separate plotting script, primarily for 
        #the pipeline paper.
        if type(dispToPickle) is str:
            pklFileName = dispToPickle
        else:
            pklFileName = defaultPklFileName
        pDict = {'hotTime':hotTime,
                 'coldTime':coldTime,
                 'deadTime':deadTime,
                 'onlyHotBadTime':onlyHotBadTime,
                 'onlyColdBadTime':onlyColdBadTime, 
                 'onlyDeadBadTime':onlyDeadBadTime,
                 'hotAndColdBadTime':hotAndColdBadTime,
                 'hotAndDeadBadTime':hotAndDeadBadTime,
                 'coldAndDeadBadTime':coldAndDeadBadTime,
                 'hotAndColdAndDeadBadTime':hotAndColdAndDeadBadTime,
                 'maskDuration':maskDuration}
        #pDict = {"hotTime":hotTime.ravel(),"coldTime":coldTime.ravel(),"deadTime":deadTime.ravel(),
        #         "duration":maskDuration}
        print 'Saving to file: ',pklFileName
        output = open(pklFileName, 'wb')
        pickle.dump(pDict,output)
        output.close()

    assert np.size(hotTime)==nPix and np.size(coldTime)==nPix and np.size(deadTime)==nPix
    assert (len(onlyHotBadTime)+len(onlyColdBadTime)+len(onlyDeadBadTime)+len(hotAndColdBadTime)
            +len(coldAndDeadBadTime)+len(hotAndDeadBadTime)+len(hotAndColdAndDeadBadTime)
            +len(allGoodBadTime))==nPix
       
    if showHist is True: 
        #Be sure it's okay to leave hot+dead pixels out, and hot+cold+dead pixels.
        #assert len(hotAndDeadBadTime)==0 and len(hotAndColdAndDeadBadTime)==0 
        mpl.figure()
        norm = 1. #1./np.size(hotTime)*100.
        dataList = [onlyHotBadTime,hotAndColdBadTime,onlyColdBadTime,coldAndDeadBadTime,onlyDeadBadTime]
        dataList2 = [x if np.size(x)>0 else np.array([-1.0]) for x in dataList]     #-1 is a dummy value for empty arrays, so that pyplot.hist doesn't barf.
        weights = [np.ones_like(x)*norm if np.size(x)>0 else np.array([0]) for x in dataList]
        mpl.hist(dataList2, range=None, #[-0.1,maskDuration+0.001],         #Eliminate data at 0sec and maskDuration sec. (always good or permanently bad)
                 weights=weights, 
                 label=['Hot only','Hot/cold','Cold only','Cold/dead','Dead only'], #,'Hot/dead','Hot/cold/dead'],
                 color=['red','pink','lightgray','lightblue','blue'], #,'green','black'],
                 bins=maskDuration/binWidth,histtype='stepfilled',stacked=True,log=False)         
        mpl.title('Duration of Bad Pixel Behaviour - '+os.path.basename(inputFileName))
        mpl.xlabel('Total time "bad" (sec)')
        mpl.ylabel('Percantage of pixels')
        mpl.legend()
        
        mpl.figure()
        mpl.hist(hotIntervals, bins=maskDuration/binWidth)
        print 'Median hot interval: ', np.median(hotIntervals)
        mpl.xlabel('Duration of hot intervals')
        mpl.ylabel('Number of intervals')
 
        mpl.figure()
        mpl.hist(coldIntervals, bins=maskDuration/binWidth)
        print 'Median cold interval: ', np.median(coldIntervals)
        mpl.xlabel('Duration of cold intervals')
        mpl.ylabel('Number of intervals')
 
        mpl.figure()
        mpl.hist(deadIntervals, bins=maskDuration/binWidth)
        print 'Median dead interval: ', np.median(deadIntervals)
        mpl.xlabel('Duration of dead intervals')
        mpl.ylabel('Number of intervals')
 
        mpl.figure()
        mpl.hist(otherIntervals, bins=maskDuration/binWidth)
        print 'Median "other" interval: ', np.median(otherIntervals)
        mpl.xlabel('Duration of "other" intervals')
        mpl.ylabel('Number of intervals')
        
        
 
    print 'Mask duration (s): ',maskDuration
    print 'Number of pixels: ',nPix
    print 'Fraction at 0s (hot,cold,dead): ', np.array([np.sum(hotTime<tol),np.sum(coldTime<tol),
                                                        np.sum(deadTime<tol)]) / float(nPix)                            
    print 'Fraction at '+str(maskDuration)+'s (hot,cold,dead): ', np.array([np.sum(hotTime>maskDuration-tol),
                                                                        np.sum(coldTime>maskDuration-tol),
                                                                        np.sum(deadTime>maskDuration-tol)])/float(nPix)
def divideObsFiles(fileName1='/Users/vaneyken/Data/UCSB/ARCONS/turkDataCopy/ScienceData/PAL2012/20121211/flat_20121212-134024.h5',
                   fileName2='/Users/vaneyken/Data/UCSB/ARCONS/turkDataCopy/ScienceData/PAL2012/20121211/flat_20121212-134637.h5',
                   firstSec=0, integrationTime=10., nbins=None):
    
    '''
    Divide the raw image from one obs file by another, and display and return the result.
    This works with raw counts, so the obs file need not be calibrated.
    
    INPUTS:
        fileName1, fileName2 -- names of two obs files to divide by each other, OR two ObsFile
                                objects can be passed directly.
        firstSec - time during the obs files at which to start integration of images (sec)
        integrationTime - time to integrate for to make the images (sec)
        nbins - number of bins for histogram plot (if None, makes a semi-reasonable guess)
        
    OUTPUTS:
        Displays the divided result to the screen and to ds9.
        Returs a tuple of image arrays:
            divided image, input image 1, input image 2, obsFile 1, obsFile 2
    '''

    if type(fileName1) is str:
        obsf1 = ObsFile.ObsFile(fileName1)
        fn1 = fileName1
    else:
        obsf1=fileName1       #Otherwise just assume it's an ObsFile instance.
        fn1=obsf1.fileName
        
    if type(fileName2) is str:
        obsf2 = ObsFile.ObsFile(fileName2)
        fn2 = fileName2
    else:
        obsf2=fileName2
        fn2=obsf2.fileName
    
    print 'Reading '+os.path.basename(fn1)
    pci1 = obsf1.getPixelCountImage(firstSec=firstSec,integrationTime=integrationTime,getRawCount=True)
    print 'Reading '+os.path.basename(fn2)
    pci2 = obsf2.getPixelCountImage(firstSec=firstSec,integrationTime=integrationTime,getRawCount=True)
    
    im1 = pci1['image']
    im2 = pci2['image']
    divIm = im1/im2
    
    med = np.median(divIm[~np.isnan(divIm)])
    #Approximate std. dev from median abs. dev. (more robust)
    sdev = astropy.stats.median_absolute_deviation(divIm[~np.isnan(divIm)]) *1.4826 
    badFlag = np.abs(divIm - med) > 3.0*sdev
    
    print 'Displaying'
    toDisplay = np.copy(divIm)
    toDisplay[np.isnan(toDisplay)]=0
    utils.plotArray(toDisplay, cbar=True, normMin=med-4.*sdev, normMax=med+4.*sdev, colormap=mpl.cm.hot, fignum=None)
    yy,xx = np.indices(np.shape(divIm))
    mpl.scatter(xx[badFlag], yy[badFlag], marker='o', c='r')
    utils.ds9Array(divIm)
    mpl.figure()
    if nbins is None: nbins=np.sqrt(np.sum(np.isfinite(divIm)))
    mpl.hist(divIm[np.isfinite(divIm)].flatten(),bins=nbins)
    
    print 'Median: ',med
    print 'Approx std. dev. (M.A.D * 1.4826): ',sdev
    print 'Done.'
    return divIm,im1,im2,obsf1,obsf2,badFlag


    
    
    width_x = float(width_x)
    width_y = float(width_y)
    return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)+offset

stackDict = np.load('nlttImageStack.npz')
stack = stackDict['stack']
if len(sys.argv) == 1:
    print 'Useage: ',sys.argv[0],' iFrame'
    print """
    set0 Frames 0-89
    set1 Frames 90-269
    set2 Frames 270-359
    """
    exit(1)
iFrame = int(sys.argv[1])
frame = stack[:,:,iFrame]
#    plt.hist(np.ravel(frame),bins=100,range=(0,5000))
#    plt.show()

nanMask = np.isnan(frame)
frame[nanMask] = 0
#nanMask[frame>500]=1
#nanMask[frame<150]=1
frame = np.ma.masked_array(frame,mask=nanMask)
sky = np.ma.masked_array(frame[0:20,20:40],mask=nanMask[0:20,20:40])
print sky.std(),np.ma.median(sky),np.ma.mean(sky)

utils.plotArray(sky,cbar=True)

    
Example #15
0
def makeImageStack(fileNames='photons_*.h5', dir=os.getenv('MKID_PROC_PATH', default="/Scratch")+'/photonLists/20131209',
                   detImage=False, saveFileName='stackedImage.pkl', wvlMin=None,
                   wvlMax=None, doWeighted=True, medCombine=False, vPlateScale=0.2,
                   nPixRA=250,nPixDec=250):
    '''
    Create an image stack
    INPUTS:
        filenames - string, list of photon-list .h5 files. Can either
                    use wildcards (e.g. 'mydirectory/*.h5') or if string
                    starts with an @, supply a text file which contains
                    a list of file names to stack. (e.g.,
                    'mydirectory/@myfilelist.txt', where myfilelist.txt 
                    is a simple text file with one file name per line.)
        dir - to provide name of a directory in which to find the files
        detImage - if True, show the images in detector x,y coordinates instead
                    of transforming to RA/dec space.
        saveFileName - name of output pickle file for saving final resulting object.
        doWeighted - boolean, if True, do the image flatfield weighting.
        medCombine - experimental, if True, do a median combine of the image stack
                     instead of just adding them all.... Prob. should be implemented
                     properly at some point, just a fudge for now.
        vPlateScale - (arcsec/virtual pixel) - to set the plate scale of the virtual
                     pixels in the outputs image.
        nPixRA,nPixDec - size of virtual pixel grid in output image.
    
    OUTPUTS:
        Returns a stacked image object, saves the same out to a pickle file, and
        (depending whether it's still set to or not) saves out the individual non-
        stacked images as it goes. 
    '''
    

    #Get the list of filenames
    if fileNames[0]=='@':
        #(Note, actually untested, but should be more or less right...)
        files=[]
        with open(fileNames[1:]) as f:
            for line in f:
                files.append(os.path.join(dir,line.strip()))
    else:
        files = glob.glob(os.path.join(dir, fileNames))

    #Initialise empty image centered on Crab Pulsar
    virtualImage = rdi.RADecImage(nPixRA=nPixRA,nPixDec=nPixDec,vPlateScale=vPlateScale,
                                  cenRA=3.20238771, cenDec=0.574944617)
    imageStack = []

                                  
    for eachFile in files:
        if os.path.exists(eachFile):
            print 'Loading: ',os.path.basename(eachFile)
            #fullFileName=os.path.join(dir,eachFile)
            phList = pl.PhotList(eachFile)
            baseSaveName,ext=os.path.splitext(os.path.basename(eachFile))
            
            if detImage is True:
                imSaveName=baseSaveName+'det.tif'
                im = phList.getImageDet(wvlMin=wvlMin,wvlMax=wvlMax)
                utils.plotArray(im)
                mpl.imsave(fname=imSaveName,arr=im,colormap=mpl.cm.gnuplot2,origin='lower')
                if eachFile==files[0]:
                    virtualImage=im
                else:
                    virtualImage+=im
            else:
                imSaveName=baseSaveName+'.tif'
                virtualImage.loadImage(phList,doStack=not medCombine,savePreStackImage=imSaveName,
                                       wvlMin=wvlMin, wvlMax=wvlMax, doWeighted=doWeighted)
                imageStack.append(virtualImage.image*virtualImage.expTimeWeights)       #Only makes sense if medCombine==True, otherwise will be ignored
                if medCombine==True:
                    medComImage = scipy.stats.nanmedian(np.array(imageStack), axis=0)
                    normMin = np.percentile(medComImage[np.isfinite(medComImage)],q=0.1)
                    normMax = np.percentile(medComImage[np.isfinite(medComImage)],q=99.9)
                    toDisplay = np.copy(medComImage)
                    toDisplay[~np.isfinite(toDisplay)] = 0
                    #utils.plotArray(toDisplay,normMin=normMin,normMax=normMax)
                else:
                    #virtualImage.display(pclip=0.1)
                    medComImage = None
                        
        else:
            print 'File doesn''t exist: ',eachFile
    
    #Save the results
    try:
        output = open(saveFileName,'wb')
        pickle.dump(virtualImage,output,-1)
        output.close()
    except:
        warnings.warn('Unable to save results for some reason...')
    
    return virtualImage, imageStack, medComImage