def testBoxer(i=0.0, j=0.0,
              x=np.array([-1.0, 0.0, 1.0, 0.0]),
              y=np.array([0.0, 1.0, 0.0, -1.0])):

    #Corners of a 2x2 square rotated at 45deg and centered at origin:
    rotSqX = np.array([-1.,0.,1.,0.])
    rotSqY = np.array([0.,1.,0.,-1.])
    #Integer values for centers of unit square:
    iList=[0,1,1,0,0,1,1,0]
    jList=[0,0,1,1,0,0,1,1]
    #List of quadrilaterals:
    xList=[rotSqX,rotSqX,rotSqX,rotSqX,
           rotSqX+0.5,rotSqX+0.5,rotSqX+0.5,rotSqX+0.5]
    yList=[rotSqY,rotSqY,rotSqY,rotSqY,
           rotSqY,rotSqY,rotSqY,rotSqY]
    #List of expected overlap areas:
    expectedAreaList=[1.0,0.25,0,0.25,0.75,0.75,0.125,0.125]

    print 'i,j,x,y,calculatedArea,expectedArea'
    for i,j,x,y,expectedArea in zip(iList,jList,xList,yList,expectedAreaList):
        calculatedArea=boxer(i, j, x, y)
        print 'i,j: ',i,j
        print 'x: ',x
        print 'y: ',y
        print 'Calculated area: ',calculatedArea
        print 'Expected area: ',expectedArea
        print
        assert np.abs(calculatedArea-expectedArea) < 1e-7
        

    print 'All good.'
    def loadImage(
        self,
        photList,
        firstSec=0,
        integrationTime=-1,
        wvlMin=None,
        wvlMax=None,
        doStack=False,
        savePreStackImage=None,
        doWeighted=True,
        maxBadPixTimeFrac=0.5,
    ):  # savePreStackImage is sort of temporary for test purposes
        """
        
        Build a de-rotated stacked image from a photon list (PhotList) object.
        If the RADecImage instance already contains an image, the new image is added to it.
        
        INPUTS:
            photList - a PhotList object from which to construct the image.
            
            firstSec - time from start of exposure to start the 'integration' for the image (seconds)
            
            integrationTime - duration of integration time to include in the image (in seconds; -1 or NaN => to end of exposure)
            
            wvlMin, wvlMax - min and max wavelengths of photons to include in the image (Angstroms).
            
            doStack - boolean; if True, then stack the image to be loaded on top of any image data already present.          
            
            wvlMin, wvlMax - set min and max wavelength cutoffs for photons to be loaded in.
            
            savePreStackImage - temporary fudge, set to a file-name to save the image out to a file prior to stacking.
            
            doWeighted - if True, includes flat and flux weighting (i.e. flatfielding and spectral response) factors from photons,
                                and rejects photons from pixels where the flatfield is bad at any wavelength within the requested
                                wavelength range (all if wvlMin/wvl Max not specified).
                                ****NOTE - FLUX WEIGHTING NOT FULLY TESTED -- but looks probably okay.****
            
            maxBadPixTimeFrac - Maximum fraction of time for which a pixel is allowed to be flagged as hot (or otherwise bad)
                                before it is written off as bad for the entire duration of the requested integration time.
        """

        # posErr = 0.8    #Approx. position error in arcsec (just a fixed estimate for now, will improve later)
        # posErr *= 2*np.pi/(60.*60.*360.)  #Convert to radians

        tic = time.clock()

        photTable = photList.file.root.photons.photons  # Shortcut to table
        # if expWeightTimeStep is not None:
        #    self.expWeightTimeStep=expWeightTimeStep

        # If hot pixels time-mask data not already parsed in (presumably not), then parse it.
        if photList.hotPixTimeMask is None:
            photList.parseHotPixTimeMask()  # Loads time mask dictionary into photList.hotPixTimeMask

        if wvlMin is not None and wvlMax is None:
            wvlMax = np.inf
        if wvlMin is None and wvlMax is not None:
            wvlMin = 0.0

        # Figure out last second of integration
        obsFileExpTime = photList.header.cols.exptime[0]
        if integrationTime == -1 or firstSec + integrationTime > obsFileExpTime:
            lastSec = obsFileExpTime
        else:
            lastSec = firstSec + integrationTime

        # If virtual coordinate grid is not yet defined, figure it out.
        if self.gridRA is None or self.gridDec is None:
            # Find RA/dec range needed, taking advantage of the fact that the ra/dec columns are (or should be) indexed....
            print "Finding RA/dec ranges"
            self.raMin = photTable.cols.ra[photTable.colindexes["ra"][0]]
            self.raMax = photTable.cols.ra[photTable.colindexes["ra"][-1]]
            self.decMin = photTable.cols.dec[photTable.colindexes["dec"][0]]
            self.decMax = photTable.cols.dec[photTable.colindexes["dec"][-1]]
            self.cenRA = (self.raMin + self.raMax) / 2.0
            self.cenDec = (self.decMin + self.decMax) / 2.0
            # Set size of virtual grid to accommodate.
            if self.nPixRA is None:
                # +1 for round up; +1 because coordinates are the boundaries of the virtual pixels, not the centers.
                self.nPixRA = int((self.raMax - self.raMin) // self.vPlateScale + 2)
            if self.nPixDec is None:
                self.nPixDec = int((self.decMax - self.decMin) // self.vPlateScale + 2)
            self.setCoordGrid()

        # Short-hand notations for no. of detector and virtual pixels, just for clarity:
        nDPixRow, nDPixCol = photList.nRow, photList.nCol
        nVPixRA, nVPixDec = self.nPixRA, self.nPixDec

        # Calculate ratio of virtual pixel area to detector pixel area
        vdPixAreaRatio = (self.vPlateScale / self.detPlateScale) ** 2

        # Make a boolean mask of dead (non functioning for whatever reason) pixels
        # True (1) = good; False (0) = dead
        # First on the basis of the wavelength cals:
        wvlCalFlagImage = photList.getBadWvlCalFlags()
        deadPixMask = np.where(
            wvlCalFlagImage == pipelineFlags.waveCal["good"], 1, 0
        )  # 1.0 where flag is good; 0.0 otherwise. (Straight boolean mask would work, but not guaranteed for Python 4....)
        print "# Dead detector pixels to reject on basis of wavelength cal: ", np.sum(deadPixMask == 0)

        # Next a mask on the basis of the flat cals (or all ones if weighting not requested)
        if doWeighted:
            flatCalFlagArray = photList.file.root.flatcal.flags.read()  # 3D array - nRow * nCol * nWavelength Bins.
            flatWvlBinEdges = (
                photList.file.root.flatcal.wavelengthBins.read()
            )  # 1D array of wavelength bin edges for the flat cal.
            lowerEdges = flatWvlBinEdges[0:-1]
            upperEdges = flatWvlBinEdges[1:]
            if wvlMin is None and wvlMax is None:
                inRange = np.ones(len(lowerEdges), dtype=bool)  # (all bins in range implies all True)
            else:
                inRange = (lowerEdges >= wvlMin) & (lowerEdges < wvlMax) | (upperEdges >= wvlMin) & (
                    lowerEdges < wvlMax
                )  ####SOMETHING NOT RIGHT HERE? DELETE IF NO ASSERTION ERROR THROWN BELOW!##########
                # Bug fix - I think this is totally equivalent - first term above is redundant, included in second term:
                inRangeOld = np.copy(inRange)  # Can delete if no assertion error thrown below
                inRange = (upperEdges >= wvlMin) & (lowerEdges < wvlMax)
                assert np.all(inRange == inRangeOld)  # Can delete once satisfied this works.
                # If this never complains, then can switch to the second form.

            flatCalMask = np.where(
                np.all(flatCalFlagArray[:, :, inRange] == False, axis=2), 1, 0
            )  # Should be zero where any pixel has a bad flag at any wavelength within the requested range; one otherwise. Spot checked, seems to work.
            print "# Detector pixels to reject on basis of flatcals: ", np.sum(flatCalMask == 0)
        else:
            flatCalMask = np.ones((nDPixRow, nDPixCol))

        # And now a mask based on how much hot pixel behaviour each pixel exhibits:
        # if a given pixel is bad more than a fraction maxBadTimeFrac of the time,
        # then write it off as permanently bad for the duration of the requested
        # integration.
        if maxBadPixTimeFrac is not None:
            print "Rejecting pixels with more than ", 100 * maxBadPixTimeFrac, "% bad-flagged time"
            detGoodIntTimes = photList.hotPixTimeMask.getEffIntTimeImage(
                firstSec=firstSec, integrationTime=lastSec - firstSec
            )
            badPixMask = np.where(
                detGoodIntTimes / (lastSec - firstSec) > (1.0 - maxBadPixTimeFrac), 1, 0
            )  # Again, 1 if okay, 0 bad. Use lastSec-firstSec instead of integrationTime in case integrationTime is -1.
            print "# pixels to reject: ", np.sum(badPixMask == 0)
            print "# pixels to reject with eff. int. time > 0: ", np.sum((badPixMask == 0) & (detGoodIntTimes > 0))
        else:
            badPixMask = np.ones((nDPixRow, nDPixCol))

        # Finally combine all the masks together into one detector pixel mask:
        detPixMask = deadPixMask * flatCalMask * badPixMask  # Combine masks
        print "Total detector pixels to reject: ", np.sum(
            detPixMask
        ), "(may not equal sum of the above since theres overlap!)"

        # Now get the photons
        print "Getting photon coords"
        print "wvlMin, wvlMax: ", wvlMin, wvlMax
        if wvlMin is None:
            assert wvlMin is None and wvlMax is None
            print "(getting all wavelengths)"
            # tic = time.clock()
            photons = photTable.readWhere("(arrivalTime>=firstSec) & (arrivalTime<=lastSec)")
            # print 'v1 time taken (s): ', time.clock()-tic
            # tic = time.clock()
            # photons = np.array([row.fetch_all_fields() for row in photTable.where('(arrivalTime>=firstSec) & (arrivalTime<=lastSec)')])
            # photIndices = photTable.getWhereList('(arrivalTime>=firstSec) & (arrivalTime<=lastSec)')
            # print 'v2 time taken (s): ', time.clock()-tic
            # print 'Doing by second method'
            # tic = time.clock()
            # photons2 = [x for x in photons.iterrows() if (x['arrivalTime']>=firstSec) and (x['arrivalTime']<=lastSec)]
            # print 'Time taken (s): ',time.clock()-tic
        else:
            assert wvlMin is not None and wvlMax is not None
            print "(trimming wavelength range) "
            photons = photTable.readWhere(
                "(arrivalTime>=firstSec) & (arrivalTime<=lastSec) & (wavelength>=wvlMin) & (wavelength<=wvlMax)"
            )

        # And filter out photons to be masked out on the basis of the detector pixel mask
        print "Finding photons in masked detector pixels..."
        whereBad = np.where(detPixMask == 0)
        badXY = pl.xyPack(
            whereBad[0], whereBad[1]
        )  # Array of packed x-y values for bad pixels (CHECK X,Y THE RIGHT WAY ROUND!)
        allPhotXY = photons["xyPix"]  # Array of packed x-y values for all photons
        # Get a boolean array indicating photons whose packed x-y coordinate value is in the 'bad' list.
        toReject = np.where(np.in1d(allPhotXY, badXY))[
            0
        ]  # [0] to take index array out of the returned 1-element tuple.
        # Chuck out the bad photons
        print "Rejecting photons from bad pixels..."
        photons = np.delete(photons, toReject)
        #########################################################################

        photRAs = photons["ra"]  # Read all photon coords into an RA and a dec array.
        photDecs = photons["dec"]
        photHAs = photons["ha"]  # Along with hour angles...
        photWeights = (
            photons["flatWeight"] * photons["fluxWeight"]
        )  # ********EXPERIMENTING WITH ADDING FLUX WEIGHT - NOT FULLY TESTED, BUT SEEMS OKAY....********
        print "INCLUDING FLUX WEIGHTS!"
        photWavelengths = photons["wavelength"]
        if wvlMin is not None or wvlMax is not None:
            assert all(photWavelengths >= wvlMin) and all(photWavelengths <= wvlMax)
        print "Min, max photon wavelengths found: ", np.min(photWavelengths), np.max(photWavelengths)
        nPhot = len(photRAs)

        # Add uniform random dither to each photon, distributed over a square
        # area of the same size and orientation as the originating pixel at
        # the time of observation (assume RA and dec are defined at center of pixel).
        xRand = np.random.rand(nPhot) * self.detPlateScale - self.detPlateScale / 2.0
        yRand = np.random.rand(nPhot) * self.detPlateScale - self.detPlateScale / 2.0  # Not the same array!
        ditherRAs = xRand * np.cos(photHAs) - yRand * np.sin(photHAs)
        ditherDecs = yRand * np.cos(photHAs) + xRand * np.sin(photHAs)

        photRAs = photRAs + ditherRAs
        photDecs = photDecs + ditherDecs

        # Make the image for this integration
        if doWeighted:
            print "Making weighted image"
            thisImage, thisGridDec, thisGridRA = np.histogram2d(
                photDecs, photRAs, [self.gridDec, self.gridRA], weights=photWeights
            )
        else:
            print "Making unweighted image"
            thisImage, thisGridDec, thisGridRA = np.histogram2d(photDecs, photRAs, [self.gridDec, self.gridRA])

        # Save the time slice images in detector coordinates if image saving is requested.
        if savePreStackImage is not None:
            saveName = "det-" + savePreStackImage
            print "Making detector-frame image slice for diagnostics: " + saveName
            detImSlice = np.histogram2d(photons["yPix"], photons["xPix"], bins=[photList.nRow, photList.nCol])[0]
            mpl.imsave(
                fname=saveName,
                arr=detImSlice,
                origin="lower",
                cmap=mpl.cm.gray,
                vmin=np.percentile(detImSlice, 0.5),
                vmax=np.percentile(detImSlice, 99.5),
            )

        # ------------
        # Time masking
        # ------------

        # And start figuring out the exposure time weights....
        print "Calculating effective exposure times"

        # First find start/end times of each timestep ('frame') for calculating effective exp. times
        # Use the same timesteps as used in calculating the astrometry.

        # tStartFrames = np.arange(start=firstSec,stop=lastSec,
        #                         step=self.expWeightTimeStep)
        # tEndFrames = (tStartFrames+self.expWeightTimeStep).clip(max=lastSec)    #Clip so that the last value doesn't go beyond the end of the exposure.
        if "centroidlist" in photList.file.root.centroidList:  # Check HDF tree structure for back compatibility
            tStartFramesAll = np.array(
                photList.file.root.centroidList.centroidlist.times.read()
            )  # Convert to array, since it's saved as a list.
        else:
            tStartFramesAll = np.array(photList.file.root.centroidList.times.read())  # For back compatibility
        tEndFramesAll = np.append(
            tStartFramesAll[1:], np.inf
        )  # Last frame goes on forever as far as we know at the moment
        withinIntegration = (tStartFramesAll < lastSec) & (tEndFramesAll > firstSec)
        tStartFrames = tStartFramesAll[withinIntegration].clip(
            min=firstSec
        )  # Now clip so that everything is within the requested integration time.
        tEndFrames = tEndFramesAll[withinIntegration].clip(max=lastSec)
        nFrames = len(tStartFrames)
        assert nFrames > 0  # Otherwise we have a problem....
        assert np.all(tStartFrames <= lastSec) and np.all(tEndFrames >= firstSec)

        # Get x,y locations of detector pixel corners (2D array of each x,y value, in detector space)
        # Assume definition where integer values represent location of pixel center.
        dPixXmin = np.indices((nDPixRow, nDPixCol))[1] - 0.5
        dPixXmax = np.indices((nDPixRow, nDPixCol))[1] + 0.5
        dPixYmin = np.indices((nDPixRow, nDPixCol))[0] - 0.5
        dPixYmax = np.indices((nDPixRow, nDPixCol))[0] + 0.5
        dPixXminFlat = dPixXmin.flatten()  # Flattened versions of the same since getRaDec() only works on flat arrays.
        dPixXmaxFlat = dPixXmax.flatten()
        dPixYminFlat = dPixYmin.flatten()
        dPixYmaxFlat = dPixYmax.flatten()

        # Create (1D) arrays for normalised center locations of virtual pixel grid (=index numbers, representing location of unit squares)
        vPixRANormCen = np.arange(nVPixRA)  # np.indices(nVPixDec,nVPixRA)[1]
        vPixDecNormCen = np.arange(nVPixDec)  # np.indices(nVPixDec,nVPixRA)[0]

        # Create 1D arrays marking edges of virtual pixels (in 'normalised' space...)
        vPixRANormMin = np.arange(nVPixRA) - 0.5
        vPixRANormMax = np.arange(nVPixRA) + 0.5
        vPixDecNormMin = np.arange(nVPixDec) - 0.5
        vPixDecNormMax = np.arange(nVPixDec) + 0.5

        # Find origin of virtual array (center of virtual pixel 0,0) in RA/dec space.
        vPixOriginRA = np.mean(self.gridRA[0:2])
        vPixOriginDec = np.mean(self.gridDec[0:2])
        vPixSize = self.vPlateScale  # Short hand, Length of side of virtual pixel in radians (assume square pixels)

        # Make array to take the total exposure times for each virtual pixel at each time step
        vExpTimesStack = np.zeros((nVPixDec, nVPixRA, nFrames))
        # vExpTimesStack2 = np.zeros((nVPixDec,nVPixRA,nFrames))  #FOR TEST PURPOSES

        # And one for the total exposure time at each pixel summed over all time steps
        vExpTimes = np.zeros((nVPixDec, nVPixRA))

        # Array to hold list of (equal) timestamps for each pixel at each timestep
        # (just for calculating the RA/dec coordinates of the pixel corners)
        frameTimeFlat = np.zeros((nDPixRow * nDPixCol))  # Also flat array for the purposes of getRaDec()
        frameTimeFlat.fill(np.nan)

        # Initialise RA/dec calculations of pixel locations for exposure time weighting
        raDecCalcObject = crd.CalculateRaDec(photList.file.root.centroidList)

        # ------------ Loop through the time steps ----------
        for iFrame in range(nFrames):

            print "Time slice: ", iFrame + 1, "/", nFrames

            # Calculate detector pixel corner locations in RA/dec space (needs to be clockwise in RA/dec space! (checked, gives +ve answers).
            frameTimeFlat.fill(tStartFrames[iFrame])
            dPixRA1, dPixDec1, dummy = raDecCalcObject.getRaDec(
                frameTimeFlat, dPixXminFlat, dPixYminFlat
            )  # dPix* should all be flat
            dPixRA2, dPixDec2, dummy = raDecCalcObject.getRaDec(frameTimeFlat, dPixXminFlat, dPixYmaxFlat)
            dPixRA3, dPixDec3, dummy = raDecCalcObject.getRaDec(frameTimeFlat, dPixXmaxFlat, dPixYmaxFlat)
            dPixRA4, dPixDec4, dummy = raDecCalcObject.getRaDec(frameTimeFlat, dPixXmaxFlat, dPixYminFlat)

            # Reshape the flat-array results into arrays matching the detector shape.
            # Default ordering for reshape should just be the reverse of flatten().
            # (Note all this can probably be avoided by just using flat arrays throughout
            # - this is just a bit more intuitive this way at the moment).
            # dPixRA1,dPixDec1 = dPixRA1Flat.reshape(detShape),dPixDec1Flat.reshape(detShape)
            # dPixRA2,dPixDec2 = dPixRA2Flat.reshape(detShape),dPixDec2Flat.reshape(detShape)
            # dPixRA3,dPixDec3 = dPixRA3Flat.reshape(detShape),dPixDec3Flat.reshape(detShape)
            # dPixRA4,dPixDec4 = dPixRA4Flat.reshape(detShape),dPixDec4Flat.reshape(detShape)

            # Normalise to scale where virtual pixel size=1 and origin is the origin of the virtual pixel grid
            dPixNormRA1 = (dPixRA1 - vPixOriginRA) / vPixSize  # dPixNorm* should all be flat.
            dPixNormRA2 = (dPixRA2 - vPixOriginRA) / vPixSize
            dPixNormRA3 = (dPixRA3 - vPixOriginRA) / vPixSize
            dPixNormRA4 = (dPixRA4 - vPixOriginRA) / vPixSize
            dPixNormDec1 = (dPixDec1 - vPixOriginDec) / vPixSize
            dPixNormDec2 = (dPixDec2 - vPixOriginDec) / vPixSize
            dPixNormDec3 = (dPixDec3 - vPixOriginDec) / vPixSize
            dPixNormDec4 = (dPixDec4 - vPixOriginDec) / vPixSize

            # Get min and max RA/decs for each of the detector pixels
            dPixCornersRA = np.array(
                [dPixNormRA1, dPixNormRA2, dPixNormRA3, dPixNormRA4]
            )  # 2D array, 4 by nRow*nCol - should be clockwise, I think!
            dPixCornersDec = np.array([dPixNormDec1, dPixNormDec2, dPixNormDec3, dPixNormDec4])
            # dPixCornersRA = np.array([dPixNormRA4,dPixNormRA3,dPixNormRA2,dPixNormRA1])      #2D array, 4 by nRow*nCol - reversed, but gives -ve results, so prob. anti-clockwise....
            # dPixCornersDec = np.array([dPixNormDec4,dPixNormDec3,dPixNormDec2,dPixNormDec1])
            dPixRANormMin = dPixCornersRA.min(axis=0)  # Flat 1D array, nRow * nCol
            dPixRANormMax = dPixCornersRA.max(axis=0)
            dPixDecNormMin = dPixCornersDec.min(axis=0)
            dPixDecNormMax = dPixCornersDec.max(axis=0)

            # Get array of effective exposure times for each detector pixel based on the hot pixel time mask
            # Multiply by the bad pixel mask and the flatcal mask so that non-functioning pixels have zero exposure time.
            # Flatten the array in the same way as the previous arrays (1D array, nRow*nCol elements).
            # detExpTimes = (hp.getEffIntTimeImage(photList.hotPixTimeMask, integrationTime=tEndFrames[iFrame]-tStartFrames[iFrame],
            #                                     firstSec=tStartFrames[iFrame]) * detPixMask).flatten()
            detExpTimes = (
                photList.hotPixTimeMask.getEffIntTimeImage(
                    firstSec=tStartFrames[iFrame], integrationTime=tEndFrames[iFrame] - tStartFrames[iFrame]
                )
                * detPixMask
            ).flatten()

            # Loop over the detector pixels.... (should be faster than looping over virtual pixels)
            for iDPix in np.arange(nDPixRow * nDPixCol):
                # Find the pixels which are likely to be overlapping (note - could do this as a sorted search to make things faster)
                maybeOverlappingRA = np.where(
                    (dPixRANormMax[iDPix] > vPixRANormMin) & (dPixRANormMin[iDPix] < vPixRANormMax)
                )[0]
                maybeOverlappingDec = np.where(
                    (dPixDecNormMax[iDPix] > vPixDecNormMin) & (dPixDecNormMin[iDPix] < vPixDecNormMax)
                )[0]

                for overlapLocRA in maybeOverlappingRA:
                    for overlapLocDec in maybeOverlappingDec:
                        # NB Boxer needs its input coordinates in *clockwise* direction; otherwise output behaviour is unspecified
                        # (though looks like it just gives -ve results. Could put an 'abs' in front of it to save bother, but
                        # not sure I'd want to guarantee that's safe)
                        overlapFrac = boxer.boxer(
                            overlapLocDec, overlapLocRA, dPixCornersDec[:, iDPix], dPixCornersRA[:, iDPix]
                        )
                        expTimeToAdd = overlapFrac * detExpTimes[iDPix]
                        vExpTimesStack[overlapLocDec, overlapLocRA, iFrame] += expTimeToAdd

        # ------------ End loop through time steps ----------

        # Sum up the exposure times from each frame:
        vExpTimes = np.sum(vExpTimesStack, axis=2)

        # Check that wherever the exposure time is zero, there are no photons that have not been rejected
        assert np.all(thisImage[vExpTimes == 0] == 0)
        # print 'Dunno why, but it passed the assertion...'

        if savePreStackImage is not None:
            print "Saving exp.time weighted pre-stacked image to " + savePreStackImage
            print "cmap: ", mpl.cm.gray
            imToSave = thisImage / vExpTimes
            mpl.imsave(
                fname=savePreStackImage,
                arr=imToSave,
                origin="lower",
                cmap=mpl.cm.gray,
                vmin=np.percentile(imToSave, 1.0),
                vmax=np.percentile(imToSave, 99.0),
            )

        if self.imageIsLoaded is False or doStack is False:
            self.image = thisImage  # For now, let's keep it this way.... Since weighting does odd things.
            self.effIntTimes = vExpTimes
            self.totExpTime = lastSec - firstSec
            self.expTimeWeights = self.totExpTime / self.effIntTimes
            self.vExpTimesStack = vExpTimesStack  # TEMPORARY FOR DEBUGGING PURPOSES
            self.imageIsLoaded = True
        else:
            assert self.imageIsLoaded == True
            print "Stacking"
            self.image += thisImage
            self.effIntTimes += vExpTimes
            self.totExpTime += lastSec - firstSec
            self.expTimeWeights = self.totExpTime / self.effIntTimes

        print "Image load done. Time taken (s): ", time.clock() - tic
    def loadImage(self,photList,firstSec=0,integrationTime=-1,wvlMin=None,wvlMax=None,
                  doStack=False,        #expWeightTimeStep=None, 
                  savePreStackImage=None, doWeighted=True):  #savePreStackImage is temporary for test purposes
        '''
        
        Build a de-rotated stacked image from a photon list (PhotList) object.
        If the RADecImage instance already contains an image, the new image is added to it.
        
        INPUTS:
            photList - a PhotList object from which to construct the image.
            firstSec - time from start of exposure to start the 'integration' for the image (seconds)
            integrationTime - duration of integration time to include in the image (in seconds; -1 or NaN => to end of exposure)
            wvlMin, wvlMax - min and max wavelengths of photons to include in the image (Angstroms).
            doStack - boolean; if True, then stack the image to be loaded on top of any image data already present.
            
            #### DEPRECATED - NOW GETS TIME STEPS STRAIGHT FROM CENTROID LIST FILES #####
            expWeightTimeStep - see __init__. If set here, overrides any value already set in the RADecImage object.
                                If the new image is being stacked on top of a current image, a new value can be
                                supplied that is different from the current image's value; but only the last value used
                                (i.e. the one supplied) will be stored in the class attribute.
            ################################
            
            wvlMin, wvlMax - set min and max wavelength cutoffs for photons to be loaded in.
            savePreStackImage - temporary fudge, set to a file-name to save the image out to a file prior to stacking.
            doWeighted - if True, includes flat and flux weighting (i.e. flatfielding and spectral response)factors from photons,
                                and rejects photons from pixels where the flatfield is bad at any wavelength within the requested
                                wavelength range (all if wvlMin/wvl Max not specified).
                                ****NOTE - FLUX WEIGHTING NOT FULLY TESTED -- but looks probably okay.****
        '''
        
        #posErr = 0.8    #Approx. position error in arcsec (just a fixed estimate for now, will improve later)
        #posErr *= 2*np.pi/(60.*60.*360.)  #Convert to radians
        
        imLoadTic = time.clock()
        
        photTable = photList.file.root.photons.photons   #Shortcut to table
        #if expWeightTimeStep is not None:
        #    self.expWeightTimeStep=expWeightTimeStep
        
        if wvlMin is not None and wvlMax is None: wvlMax = np.inf
        if wvlMin is None and wvlMax is not None: wvlMin = 0.0
        
        #Figure out last second of integration
        obsFileExpTime = photList.header.cols.exptime[0]
        if integrationTime==-1 or firstSec+integrationTime > obsFileExpTime:
            lastSec = obsFileExpTime
        else:
            lastSec = firstSec+integrationTime
       
        #If virtual coordinate grid is not yet defined, figure it out.
        if self.gridRA is None or self.gridDec is None:
            #Find RA/dec range needed, taking advantage of the fact that the ra/dec columns are (or should be) indexed....
            print 'Finding RA/dec ranges' 
            self.raMin = photTable.cols.ra[photTable.colindexes['ra'][0]]
            self.raMax = photTable.cols.ra[photTable.colindexes['ra'][-1]]
            self.decMin = photTable.cols.dec[photTable.colindexes['dec'][0]]
            self.decMax = photTable.cols.dec[photTable.colindexes['dec'][-1]]
            self.cenRA = (self.raMin+self.raMax)/2.0
            self.cenDec = (self.decMin+self.decMax)/2.0
            #Set size of virtual grid to accommodate.
            if self.nPixRA is None:
                #+1 for round up; +1 because coordinates are the boundaries of the virtual pixels, not the centers.
                self.nPixRA = int((self.raMax-self.raMin)//self.vPlateScale + 2)     
            if self.nPixDec is None:
                self.nPixDec = int((self.decMax-self.decMin)//self.vPlateScale + 2)
            self.setCoordGrid()
            
        #Short-hand notations for no. of detector and virtual pixels, just for clarity:
        nDPixRow,nDPixCol = photList.nRow,photList.nCol
        nVPixRA,nVPixDec = self.nPixRA,self.nPixDec
        
        #Calculate ratio of virtual pixel area to detector pixel area
        vdPixAreaRatio = (self.vPlateScale/self.detPlateScale)**2
        
        #Make a boolean mask of dead (non functioning for whatever reason) pixels
        #True (1) = good; False (0) = dead 
        #First on the basis of the wavelength cals:
        wvlCalFlagImage = photList.getBadWvlCalFlags()
        deadPixMask = np.where(wvlCalFlagImage == pipelineFlags.waveCal['good'], 1, 0)   #1.0 where flag is good; 0.0 otherwise. (Straight boolean mask would work, but not guaranteed for Python 4....)

        #Next on the basis of the flat cals (or all ones if weighting not requested)
        if doWeighted:
            flatCalFlagArray = photList.file.root.flatcal.flags.read()       # 3D array - nRow * nCol * nWavelength Bins.
            flatWvlBinEdges = photList.file.root.flatcal.wavelengthBins.read()   # 1D array of wavelength bin edges for the flat cal.
            lowerEdges = flatWvlBinEdges[0:-1]
            upperEdges = flatWvlBinEdges[1:]
            if wvlMin is None and wvlMax is None:
                inRange = np.ones(len(lowerEdges),dtype=bool)   # (all bins in range implies all True)
            else:
                inRange = ((lowerEdges >= wvlMin) & (lowerEdges < wvlMax) |
                           (upperEdges >= wvlMin) & (lowerEdges < wvlMax))
            flatCalMask = np.where(np.all(flatCalFlagArray[:,:,inRange]==False, axis=2), 1, 0) # Should be zero where any pixel has a bad flag at any wavelength within the requested range; one otherwise. Spot checked, seems to work.
        else:
            flatCalMask = np.ones((nDPixRow,nDPixCol))
        
        #If hot pixels time-mask data not already parsed in, then parse it.
        if photList.hotPixTimeMask is None:
            photList.parseHotPixTimeMask()      #Loads time mask dictionary into photList.hotPixTimeMask
         
        #First find start/end times of each timestep ('frame') for calculating effective exp. times
        #and for subdividing the image data (the latter is only needed for the purposes of
        #splitting the data into small chunks so it'll fit in memory easily).
        #Use the same timesteps as used in calculating the astrometry.

        tStartFramesAll = np.array(photList.file.root.centroidList.times.read()) #Convert to array, since it's saved as a list.
        tEndFramesAll = np.append(tStartFramesAll[1:], np.inf)                   #Last frame goes on forever as far as we know at the moment
        withinIntegration = ((tStartFramesAll < lastSec) & (tEndFramesAll > firstSec))
        tStartFrames = tStartFramesAll[withinIntegration].clip(min=firstSec)     #Now clip so that everything is within the requested integration time.
        tEndFrames = tEndFramesAll[withinIntegration].clip(max=lastSec)
        nFrames = len(tStartFrames)
        assert nFrames > 0      #Otherwise we have a problem....
        assert np.all(tStartFrames <= lastSec) and np.all(tEndFrames >= firstSec)
        
        #Get x,y locations of detector pixel corners (2D array of each x,y value, in detector space)
        dPixXmin = np.indices((nDPixRow,nDPixCol))[1] - 0.5
        dPixXmax = np.indices((nDPixRow,nDPixCol))[1] + 0.5
        dPixYmin = np.indices((nDPixRow,nDPixCol))[0] - 0.5
        dPixYmax = np.indices((nDPixRow,nDPixCol))[0] + 0.5
        dPixXminFlat = dPixXmin.flatten()   #Flattened versions of the same since getRaDec() only works on flat arrays.
        dPixXmaxFlat = dPixXmax.flatten()
        dPixYminFlat = dPixYmin.flatten()
        dPixYmaxFlat = dPixYmax.flatten()
        
        #Create (1D) arrays for normalised center locations of virtual pixel grid (=index numbers, representing location of unit squares)
        vPixRANormCen = np.arange(nVPixRA)   #np.indices(nVPixDec,nVPixRA)[1]
        vPixDecNormCen = np.arange(nVPixDec) #np.indices(nVPixDec,nVPixRA)[0]
        
        #Create 1D arrays marking edges of virtual pixels (in 'normalised' space...)
        vPixRANormMin = np.arange(nVPixRA)-0.5
        vPixRANormMax = np.arange(nVPixRA)+0.5
        vPixDecNormMin = np.arange(nVPixDec)-0.5
        vPixDecNormMax = np.arange(nVPixDec)+0.5
        
        #Find origin of virtual array (center of virtual pixel 0,0) in RA/dec space.
        vPixOriginRA = np.mean(self.gridRA[0:2])     
        vPixOriginDec = np.mean(self.gridDec[0:2])
        vPixSize = self.vPlateScale       #Short hand, Length of side of virtual pixel in radians (assume square pixels)
        
        #Make arrays to take the total exposure times and image data for each virtual pixel at each time step
        vExpTimesStack = np.zeros((nVPixDec,nVPixRA,nFrames))
        imageStack = np.zeros((nVPixDec,nVPixRA,nFrames))
        
        #And one for the total exposure time at each pixel summed over all time steps
        vExpTimes = np.zeros((nVPixDec,nVPixRA))
        
        #Array to hold list of (equal) timestamps for each pixel at each timestep
        #(just for calculating the RA/dec coordinates of the pixel corners)
        frameTimeFlat = np.zeros((nDPixRow*nDPixCol))   #Also flat array for the purposes of getRaDec()
        frameTimeFlat.fill(np.nan)
        
        #Initialise RA/dec calculations of pixel locations for exposure time weighting
        raDecCalcObject = crd.CalculateRaDec(photList.file.root.centroidList)            
         
        #------------ Loop through the time steps ----------
        for iFrame in range(nFrames):
            
            print 'Time slice: ',iFrame+1, '/', nFrames

            #-------------Make image for this time step-----------
            
            #Get the photons
            print 'Getting photon coords'
            print 'wvlMin, wvlMax: ',wvlMin,wvlMax
            if wvlMin is None:
                assert wvlMin is None and wvlMax is None
                print '(getting all wavelengths)'
                tic = time.clock()
                strt, fin = tStartFrames[iFrame], tEndFrames[iFrame]  #Just because Numexpr can't handle indexing, it seems
                photons = photTable.readWhere('(arrivalTime>=strt) & (arrivalTime<fin)')
                #photons = np.array([row.fetch_all_fields() for row in photTable.where('(arrivalTime>=strt) & (arrivalTime<=fin)')])
                #photIndices = photTable.getWhereList('(arrivalTime>=strt) & (arrivalTime<=fin)')
                print 'Time taken (s): ',time.clock()-tic
            else:
                assert wvlMin is not None and wvlMax is not None
                print '(trimming wavelength range) '
                photons = photTable.readWhere('(arrivalTime>=strt) & (arrivalTime<=fin) & (wavelength>=wvlMin) & (wavelength<=wvlMax)')
            
            #Filter out photons to be masked out on the basis of detector pixel
            print 'Finding bad detector pixels...'
            detPixMask = deadPixMask * flatCalMask      #Combine wave cal pixel mask and flat cal mask (should be the same in an ideal world, but not 
            whereBad = np.where(detPixMask == 0)
            badXY = pl.xyPack(whereBad[0],whereBad[1])   #Array of packed x-y values for bad pixels (CHECK X,Y THE RIGHT WAY ROUND!)
            allPhotXY = photons['xyPix']                 #Array of packed x-y values for all photons           
            #Get a boolean array indicating photons whose packed x-y coordinate value is in the 'bad' list.
            toReject = np.where(np.in1d(allPhotXY,badXY))[0]      #Zero to take index array out of the returned 1-element tuple.
            #Chuck out the bad photons
            print 'Rejecting photons from bad pixels...'
            photons = np.delete(photons,toReject)
            
            #Pull out needed information
            print 'Pulling out relevant columns'
            photRAs = photons['ra']       #Read all photon coords into an RA and a dec array.
            photDecs = photons['dec']
            photHAs = photons['ha']       #Along with hour angles...
            photWeights = photons['flatWeight'] * photons['fluxWeight']   #********EXPERIMENTING WITH ADDING FLUX WEIGHT - NOT FULLY TESTED, BUT SEEMS OKAY....********
            print 'INCLUDING FLUX WEIGHTS!'
            photWavelengths = photons['wavelength']
            del(photons)                  #Not needed till next iteration, and it takes up a lot of memory....
            
            if wvlMin is not None or wvlMax is not None:
                assert all(photWavelengths>=wvlMin) and all(photWavelengths<=wvlMax)
            print 'Min, max photon wavelengths found: ', np.min(photWavelengths), np.max(photWavelengths)
            nPhot = len(photRAs)
            
            
            #Add uniform random dither to each photon, distributed over a square 
            #area of the same size and orientation as the originating pixel at 
            #the time of observation.
            xRand = np.random.rand(nPhot)*self.detPlateScale-self.detPlateScale/2.0
            yRand = np.random.rand(nPhot)*self.detPlateScale-self.detPlateScale/2.0       #Not the same array!
            ditherRAs = xRand*np.cos(photHAs) - yRand*np.sin(photHAs)
            ditherDecs = yRand*np.cos(photHAs) + xRand*np.sin(photHAs)
            
            photRAs=photRAs+ditherRAs
            photDecs=photDecs+ditherDecs
            
            #Make the image for this time slice
            if doWeighted:
                print 'Making weighted image'
                imageStack[:,:,iFrame],thisGridDec,thisGridRA = np.histogram2d(photDecs,photRAs,[self.gridDec,self.gridRA],
                                                                          weights=photWeights)
            else:
                print 'Making unweighted image'        
                imageStack[:,:,iFrame],thisGridDec,thisGridRA = np.histogram2d(photDecs,photRAs,[self.gridDec,self.gridRA])
                    
            if savePreStackImage is not None:
                saveName = 'det'+str(start)+'-'+str(fin)+'s-'+savePreStackImage
                print 'Making det-frame image for diagnostics: '+saveName
                detImSlice = np.histogram2d(photons['yPix'],photons['xPix'])
                mpl.imsave(fname=saveName,arr=detImSlice,origin='lower',
                           cmap=mpl.cm.gray,vmin=np.percentile(detImSlice, 0.5), vmax=np.percentile(detImSlice,99.5))

            #----------Now start figuring out effective exposure times for each virtual pixel----------------
             
            #And start figuring out the exposure time weights....            
            print 'Calculating effective exposure times'
                    
            #Calculate detector pixel corner locations in RA/dec space (needs to be clockwise in RA/dec space! (checked, gives +ve answers).
            frameTimeFlat.fill(tStartFrames[iFrame])
            dPixRA1,dPixDec1,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXminFlat,dPixYminFlat)      #dPix* should all be flat
            dPixRA2,dPixDec2,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXminFlat,dPixYmaxFlat)   
            dPixRA3,dPixDec3,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXmaxFlat,dPixYmaxFlat)
            dPixRA4,dPixDec4,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXmaxFlat,dPixYminFlat)

            #Normalise to scale where virtual pixel size=1 and origin is the origin of the virtual pixel grid
            dPixNormRA1 = (dPixRA1 - vPixOriginRA)/vPixSize     #dPixNorm* should all be flat.
            dPixNormRA2 = (dPixRA2 - vPixOriginRA)/vPixSize
            dPixNormRA3 = (dPixRA3 - vPixOriginRA)/vPixSize
            dPixNormRA4 = (dPixRA4 - vPixOriginRA)/vPixSize
            dPixNormDec1 = (dPixDec1 - vPixOriginDec)/vPixSize
            dPixNormDec2 = (dPixDec2 - vPixOriginDec)/vPixSize
            dPixNormDec3 = (dPixDec3 - vPixOriginDec)/vPixSize
            dPixNormDec4 = (dPixDec4 - vPixOriginDec)/vPixSize
                
            #Get min and max RA/decs for each of the detector pixels    
            dPixCornersRA = np.array([dPixNormRA1,dPixNormRA2,dPixNormRA3,dPixNormRA4])      #2D array, 4 by nRow*nCol - should be clockwise, I think!
            dPixCornersDec = np.array([dPixNormDec1,dPixNormDec2,dPixNormDec3,dPixNormDec4])
            #dPixCornersRA = np.array([dPixNormRA4,dPixNormRA3,dPixNormRA2,dPixNormRA1])      #2D array, 4 by nRow*nCol - reversed, but gives -ve results, so prob. anti-clockwise....
            #dPixCornersDec = np.array([dPixNormDec4,dPixNormDec3,dPixNormDec2,dPixNormDec1])
            dPixRANormMin = dPixCornersRA.min(axis=0)     #Flat 1D array, nRow * nCol
            dPixRANormMax = dPixCornersRA.max(axis=0)
            dPixDecNormMin = dPixCornersDec.min(axis=0)
            dPixDecNormMax = dPixCornersDec.max(axis=0)

            #Get array of effective exposure times for each detector pixel based on the hot pixel time mask
            #Multiply by the bad pixel mask and the flatcal mask so that non-functioning pixels have zero exposure time.
            #Flatten the array in the same way as the previous arrays (1D array, nRow*nCol elements).
            detExpTimes = (hp.getEffIntTimeImage(photList.hotPixTimeMask, integrationTime=tEndFrames[iFrame]-tStartFrames[iFrame],
                                                 firstSec=tStartFrames[iFrame]) * detPixMask).flatten()
            
            
            #Loop over the detector pixels and accumulate the exposure time that falls in each
            #tic = time.clock()
            for iDPix in np.arange(nDPixRow * nDPixCol):
                    #Find the pixels which are likely to be overlapping (note - could do this as a sorted search to make things faster)
                    maybeOverlappingRA = np.where((dPixRANormMax[iDPix] > vPixRANormMin) & (dPixRANormMin[iDPix] < vPixRANormMax))[0]
                    maybeOverlappingDec = np.where((dPixDecNormMax[iDPix] > vPixDecNormMin) & (dPixDecNormMin[iDPix] < vPixDecNormMax))[0]
                    
                    for overlapLocRA in maybeOverlappingRA:
                        for overlapLocDec in maybeOverlappingDec:
                            overlapFrac = boxer.boxer(overlapLocDec,overlapLocRA,dPixCornersDec[:,iDPix],dPixCornersRA[:,iDPix])
                            expTimeToAdd = overlapFrac*detExpTimes[iDPix]
                            vExpTimesStack[overlapLocDec,overlapLocRA,iFrame] += expTimeToAdd
           
           #print 'Time taken (s): ',time.clock()-tic                

        #------------ End loop through time steps ----------

                
        #Sum up the exposure times from each frame:
        vExpTimes = np.sum(vExpTimesStack,axis=2)
        thisImage = np.sum(imageStack,axis=2)
        
        #Check that wherever the exposure time is zero, there are no photons that have not been rejected
        #assert np.all(thisImage[vExpTimes==0] == 0)
        #assert 1==0
        
        #Temporary for testing-------------
        if savePreStackImage is not None:
            print 'Saving pre-stacked image to '+savePreStackImage
            mpl.imsave(fname=savePreStackImage,arr=thisImage,origin='lower',cmap=mpl.cm.gray,
                       vmin=np.percentile(thisImage, 0.5), vmax=np.percentile(thisImage,99.5))
        #---------------------------------
        
        if self.imageIsLoaded is False or doStack is False:
            self.image = thisImage           #For now, let's keep it this way.... Since weighting does odd things.
            self.effIntTimes = vExpTimes
            self.totExpTime = lastSec-firstSec
            self.expTimeWeights = self.totExpTime/self.effIntTimes
            self.vExpTimesStack = vExpTimesStack                   #TEMPORARY FOR DEBUGGING PURPOSES
            self.imageIsLoaded = True
        else:
            assert self.imageIsLoaded == True
            print 'Stacking'
            self.image += thisImage
            self.effIntTimes += vExpTimes
            self.totExpTime += lastSec-firstSec
            self.expTimeWeights = self.totExpTime/self.effIntTimes

        print 'Image load done. Time taken (s): ', time.clock()-imLoadTic