예제 #1
0
    def Loadh5(self, filename):
        '''Load PYMEs semi-custom HDF5 image data format. Offloads all the
        hard work to the HDFDataSource class'''
        import tables
        from PYME.Analysis.DataSources import HDFDataSource, BGSDataSource
        from PYME.Analysis.LMVis import inpFilt

        #open hdf5 file
        self.dataSource = HDFDataSource.DataSource(filename, None)
        #chain on a background subtraction data source, so we can easily do
        #background subtraction in the GUI the same way as in the analysis
        self.data = BGSDataSource.DataSource(
            self.dataSource)  #this will get replaced with a wrapped version

        if 'MetaData' in self.dataSource.h5File.root:  #should be true the whole time
            self.mdh = MetaData.TIRFDefault
            self.mdh.copyEntriesFrom(
                MetaDataHandler.HDFMDHandler(self.dataSource.h5File))
        else:
            self.mdh = MetaData.TIRFDefault
            wx.MessageBox(
                "Carrying on with defaults - no gaurantees it'll work well",
                'ERROR: No metadata found in file ...', wx.OK)
            print(
                "ERROR: No metadata fond in file ... Carrying on with defaults - no gaurantees it'll work well"
            )

        #attempt to estimate any missing parameters from the data itself
        MetaData.fillInBlanks(self.mdh, self.dataSource)

        #calculate the name to use when we do batch analysis on this
        from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        #try and find a previously performed analysis
        fns = filename.split(os.path.sep)
        cand = os.path.sep.join(fns[:-2] + [
            'analysis',
        ] + fns[-2:]) + 'r'
        print(cand)
        if False:  #os.path.exists(cand):
            h5Results = tables.openFile(cand)

            if 'FitResults' in dir(h5Results.root):
                self.fitResults = h5Results.root.FitResults[:]
                self.resultsSource = inpFilt.h5rSource(h5Results)

                self.resultsMdh = MetaData.TIRFDefault
                self.resultsMdh.copyEntriesFrom(
                    MetaDataHandler.HDFMDHandler(h5Results))

        self.events = self.dataSource.getEvents()
예제 #2
0
    def __init__(self, pth, mode='r'):
        if mode in ['w', 'a', 'r+'] and os.path.exists(pth):
            raise RuntimeError('Cannot open existing file in write mode')
        self.h5f = tables.openFile(pth, mode)
        self.mode = mode

        self.complevel = 6
        self.complib = 'zlib'

        self.mdh = MetaDataHandler.CachingMDHandler(
            MetaDataHandler.HDFMDHandler(self.h5f))

        if 'ImageData' in dir(self.h5f.root):
            self.dshape = [
                self.h5f.root.ImageData.shape[1],
                self.h5f.root.ImageData.shape[2],
                self.h5f.root.ImageData.shape[0]
            ]
        else:
            self.dshape = [0, 0, 0]

        if 'Events' in dir(self.h5f.root):
            self.nEvents = self.h5f.root.Events.shape[0]
        else:
            self.nEvents = 0

            if mode == 'w':
                self._checkCreateEventsTable()
예제 #3
0
def getImageTags(filename):
    ext = os.path.splitext(filename)[1]
    #print ext

    tags = []

    try:
        if ext in ['.h5', '.h5r']:
            h5f = tables.openFile(filename)
            if 'Events' in dir(h5f.root):
                events = h5f.root.Events[:]

                evKeyNames = set()
                for e in events:
                    evKeyNames.add(e['EventName'])

                if 'ProtocolFocus' in evKeyNames:
                    tags.append('Z-Stack')

            md = MetaDataHandler.HDFMDHandler(h5f)

            if 'Protocol.Filename' in md.getEntryNames():
                tags.append('Protocol_%s' % md.getEntry('Protocol.Filename'))

            h5f.close()
    except:
        pass

    return tags
예제 #4
0
def genImageID(filename, guess=False):
    ext = os.path.splitext(filename)[1]
    #print ext

    try:
        if ext == '.h5':
            return genDataFileID(filename)
        elif ext == '.h5r':
            h5f = tables.openFile(filename)
            md = MetaDataHandler.HDFMDHandler(h5f)

            if 'Analysis.DataFileID' in md.getEntryNames():
                ret = md.getEntry('Analysis.DataFileID')
            elif guess:
                ret = guessH5RImageID(filename)
                #print ret
            else:
                ret = None
            #print guess, ret

            h5f.close()
            return ret
        else:
            return hashString32(filename)
    except:
        return hashString32(filename)
예제 #5
0
def generateThumbnail(inputFile, thumbSize):
    global size
    #logging.debug('Input File: %s\n' % inputFile)
    #logging.debug('Ouput File: %s\n' % outputFile)
    #logging.debug('Thumb Size: %s\n' % thumbSize)

    h5f = tables.openFile(inputFile)

    dataSource = HDFDataSource.DataSource(inputFile, None)

    md = MetaData.genMetaDataFromSourceAndMDH(
        dataSource, MetaDataHandler.HDFMDHandler(h5f))

    xsize = h5f.root.ImageData.shape[1]
    ysize = h5f.root.ImageData.shape[2]

    if xsize > ysize:
        zoom = float(thumbSize) / xsize
    else:
        zoom = float(thumbSize) / ysize

    size = (int(xsize * zoom), int(ysize * zoom))

    im = h5f.root.ImageData[min(md.EstimatedLaserOnFrameNo + 10,
                                (h5f.root.ImageData.shape[0] -
                                 1)), :, :].astype('f')

    im = im.T - min(md.Camera.ADOffset, im.min())

    h5f.close()

    im = maximum(minimum(1 * (255 * im) / im.max(), 255), 0)

    return im.astype('uint8')
예제 #6
0
def extractFramesF(inFile, outFile, start, end, complib='zlib', complevel=9):
    h5in = HDFDataSource(inFile)

    md = MetaDataHandler.HDFMDHandler(h5in.h5File)

    extractFrames(h5in, md, h5in.h5File.filename, outFile, start, end, complib,
                  complevel)

    h5in.release()
예제 #7
0
 def __init__(self, scope, filename, acquisator, protocol = p.NullProtocol, parent=None, complevel=6, complib='zlib'):
     self.h5File = tables.openFile(filename, 'w')
        
     filt = tables.Filters(complevel, complib, shuffle=True)
     
     self.imageData = self.h5File.createEArray(self.h5File.root, 'ImageData', tables.UInt16Atom(), (0,scope.cam.GetPicWidth(),scope.cam.GetPicHeight()), filters=filt)
     self.md = MetaDataHandler.HDFMDHandler(self.h5File)
     self.evtLogger = EventLogger(self, scope, self.h5File)
     
     sp.Spooler.__init__(self, scope, filename, acquisator, protocol, parent)
예제 #8
0
    def __init__(self,
                 name,
                 resultsFilename,
                 initialTasks=[],
                 onEmpty=doNix,
                 fTaskToPop=popZero):
        if resultsFilename is None:
            resultsFilename = genResultFileName(name)

        if os.path.exists(
                resultsFilename):  #bail if output file already exists
            raise RuntimeError('Output file already exists: ' +
                               resultsFilename)

        TaskQueue.__init__(self, name, initialTasks, onEmpty, fTaskToPop)
        self.resultsFilename = resultsFilename

        self.numClosedTasks = 0

        logging.info('Creating results file')

        self.h5ResultsFile = tables.openFile(self.resultsFilename, 'w')

        self.prepResultsFile()  # pass

        #self.fileResultsLock = threading.Lock()
        self.fileResultsLock = tablesLock

        logging.info('Creating results metadata')

        self.resultsMDH = MetaDataHandler.HDFMDHandler(self.h5ResultsFile)
        self.metaData = MetaDataHandler.NestedClassMDHandler()
        #self.metaData = None #MetaDataHandler.NestedClassMDHandler(self.resultsMDH)
        self.metaDataStale = True
        self.MDHCache = []

        logging.info('Creating results events table')
        with self.fileResultsLock.wlock:
            self.resultsEvents = self.h5ResultsFile.createTable(
                self.h5ResultsFile.root,
                'Events',
                SpoolEvent,
                filters=tables.Filters(complevel=5, shuffle=True))

        logging.info('Events table created')

        self.haveResultsTable = False

        self.resultsQueue = []  #Queue.Queue()
        self.resultsQueueLock = threading.Lock()
        self.lastResultsQueuePurge = time.time()

        logging.info('Results file initialised')
예제 #9
0
def extractFrames(dataSource,
                  metadata,
                  origName,
                  outFile,
                  start,
                  end,
                  subsamp=1,
                  complib='zlib',
                  complevel=5):

    h5out = tables.openFile(outFile, 'w')
    filters = tables.Filters(complevel, complib, shuffle=True)

    nframes = end - start
    xSize, ySize = dataSource.getSliceShape()

    ims = h5out.createEArray(h5out.root,
                             'ImageData',
                             tables.UInt16Atom(), (0, xSize, ySize),
                             filters=filters,
                             expectedrows=nframes)
    for frameN in range(start, end, subsamp):
        im = dataSource.getSlice(frameN)[None, :, :]
        for fN in range(frameN + 1, frameN + subsamp):
            im += dataSource.getSlice(fN)[None, :, :]
        ims.append(im)
        ims.flush()

    outMDH = MetaDataHandler.HDFMDHandler(h5out)

    outMDH.copyEntriesFrom(metadata)
    outMDH.setEntry('cropping.originalFile', origName)
    outMDH.setEntry('cropping.start', start)
    outMDH.setEntry('cropping.end', end)
    outMDH.setEntry('cropping.averaging', subsamp)

    if 'Camera.ADOffset' in metadata.getEntryNames():
        outMDH.setEntry('Camera.ADOffset',
                        subsamp * metadata.getEntry('Camera.ADOffset'))

    outEvents = h5out.createTable(h5out.root,
                                  'Events',
                                  SpoolEvent,
                                  filters=tables.Filters(complevel=5,
                                                         shuffle=True))

    #copy events to results file
    evts = dataSource.getEvents()
    if len(evts) > 0:
        outEvents.append(evts)

    h5out.flush()
    h5out.close()
예제 #10
0
def genImageTime(filename):
    ext = os.path.splitext(filename)[1]
    #print ext

    try:
        if ext in ['.h5', '.h5r']:
            h5f = tables.openFile(filename)
            md = MetaDataHandler.HDFMDHandler(h5f)

            ret = md.getEntry('StartTime')
            #print guess, ret

            h5f.close()
            return ret
        else:
            return 0
    except:
        return 0
예제 #11
0
def getFileMetadata(filename):
    ext = os.path.splitext(filename)[1]
    #print ext

    mdh = MetaDataHandler.NestedClassMDHandler()

    try:
        if ext in ['.h5', '.h5r']:
            h5f = tables.openFile(filename)
            md = MetaDataHandler.HDFMDHandler(h5f)

            mdh = MetaDataHandler.NestedClassMDHandler(md)
            #print guess, ret

            h5f.close()
    except:
        pass

    return mdh
예제 #12
0
    def __init__(self,
                 name,
                 dataFilename=None,
                 resultsFilename=None,
                 onEmpty=doNix,
                 fTaskToPop=popZero,
                 startAt='guestimate',
                 frameSize=(-1, -1),
                 complevel=6,
                 complib='zlib'):
        if dataFilename is None:
            self.dataFilename = genDataFilename(name)
        else:
            self.dataFilename = dataFilename

        if resultsFilename is None:
            resultsFilename = genResultFileName(self.dataFilename)
        else:
            resultsFilename = resultsFilename

        ffn = getFullFilename(self.dataFilename)

        self.acceptNewTasks = False
        self.releaseNewTasks = False

        self.postTaskBuffer = []

        initialTasks = []

        if os.path.exists(ffn):  #file already exists - read from it
            self.h5DataFile = tables.openFile(ffn, 'r')
            #self.metaData = MetaData.genMetaDataFromHDF(self.h5DataFile)
            self.dataMDH = MetaDataHandler.NestedClassMDHandler(
                MetaDataHandler.HDFMDHandler(self.h5DataFile))
            #self.dataMDH.mergeEntriesFrom(MetaData.TIRFDefault)
            self.imageData = self.h5DataFile.root.ImageData

            if startAt == 'guestimate':  #calculate a suitable starting value
                tLon = self.dataMDH.EstimatedLaserOnFrameNo
                if tLon == 0:
                    startAt = 0
                else:
                    startAt = tLon + 10

            if startAt == 'notYet':
                initialTasks = []
            else:
                initialTasks = list(
                    range(startAt, self.h5DataFile.root.ImageData.shape[0]))

            self.imNum = len(self.imageData)
            self.dataRW = False

        else:  #make ourselves a new file
            self.h5DataFile = tables.openFile(ffn, 'w')
            filt = tables.Filters(complevel, complib, shuffle=True)

            self.imageData = self.h5DataFile.createEArray(
                self.h5DataFile.root,
                'ImageData',
                tables.UInt16Atom(), (0, ) + tuple(frameSize),
                filters=filt,
                chunkshape=(1, ) + tuple(frameSize))
            self.events = self.h5DataFile.createTable(self.h5DataFile.root,
                                                      'Events',
                                                      SpoolEvent,
                                                      filters=filt)
            self.imNum = 0
            self.acceptNewTasks = True

            self.dataMDH = MetaDataHandler.HDFMDHandler(self.h5DataFile)
            self.dataMDH.mergeEntriesFrom(MetaData.TIRFDefault)
            self.dataRW = True

        HDFResultsTaskQueue.__init__(self, name, resultsFilename, initialTasks,
                                     onEmpty, fTaskToPop)

        self.resultsMDH.copyEntriesFrom(self.dataMDH)
        self.metaData.copyEntriesFrom(self.resultsMDH)

        #copy events to results file
        if len(self.h5DataFile.root.Events) > 0:
            self.resultsEvents.append(self.h5DataFile.root.Events[:])

        self.queueID = name

        self.numSlices = self.imageData.shape[0]

        #self.dataFileLock = threading.Lock()
        self.dataFileLock = tablesLock
        #self.getTaskLock = threading.Lock()
        self.lastTaskTime = 0
예제 #13
0
    def Export(self,
               data,
               outFile,
               xslice,
               yslice,
               zslice,
               metadata=None,
               events=None,
               origName=None):
        h5out = tables.openFile(outFile, 'w')
        filters = tables.Filters(self.complevel, self.complib, shuffle=True)

        nframes = (zslice.stop - zslice.start) / zslice.step

        xSize, ySize = data[xslice, yslice, 0].shape[:2]

        print((xSize, ySize))

        #atm = tables.UInt16Atom()
        atm = tables.Atom.from_dtype(data[xslice, yslice, 0].dtype)

        ims = h5out.createEArray(
            h5out.root,
            'ImageData',
            atm, (0, xSize, ySize),
            filters=filters,
            expectedrows=nframes)  #, chunkshape=(1,xSize,ySize))

        for frameN in range(zslice.start, zslice.stop, zslice.step):
            im = data[xslice, yslice, frameN].squeeze()

            for fN in range(frameN + 1, frameN + zslice.step):
                im += data[xslice, yslice, fN].squeeze()

            if im.ndim == 1:
                im = im.reshape((-1, 1))[None, :, :]
            else:
                im = im[None, :, :]

            #print im.shape
            ims.append(im)
            #ims.flush()

        ims.flush()

        outMDH = MetaDataHandler.HDFMDHandler(h5out)

        if not metadata == None:
            outMDH.copyEntriesFrom(metadata)

            if 'Camera.ADOffset' in metadata.getEntryNames():
                outMDH.setEntry(
                    'Camera.ADOffset',
                    zslice.step * metadata.getEntry('Camera.ADOffset'))

        if not origName == None:
            outMDH.setEntry('cropping.originalFile', origName)

        outMDH.setEntry('cropping.xslice', xslice.indices(data.shape[0]))
        outMDH.setEntry('cropping.yslice', yslice.indices(data.shape[1]))
        outMDH.setEntry('cropping.zslice', zslice.indices(data.shape[2]))

        outEvents = h5out.createTable(h5out.root,
                                      'Events',
                                      SpoolEvent,
                                      filters=tables.Filters(complevel=5,
                                                             shuffle=True))

        if not events == None:
            #copy events to results file
            if len(events) > 0:
                outEvents.append(events)

        h5out.flush()
        h5out.close()
import sys
import tables

from PYME.Acquire import MetaDataHandler

import storm_analysis.sa_library.readinsight3 as readinsight3

if (len(sys.argv) != 4):
    print("usage: <bin file> <h5r file> <pixel size (nm)>")
    exit()

# Create the h5r file.
h5ResultsFile = tables.openFile(sys.argv[2], 'w')

# Create an empty metadata section.
resultsMDH = MetaDataHandler.HDFMDHandler(h5ResultsFile)


# Create an empty events table.
class SpoolEvent(tables.IsDescription):
    EventName = tables.StringCol(32)
    Time = tables.Time64Col()
    EventDescr = tables.StringCol(256)


resultsEvents = h5ResultsFile.createTable(h5ResultsFile.root,
                                          'Events',
                                          SpoolEvent,
                                          filters=tables.Filters(complevel=5,
                                                                 shuffle=True))
예제 #15
0
    def OpenFile(self, filename):
        self.dataSources = []
        if 'zm' in dir(self):
            del self.zm
        self.filter = None
        self.mapping = None
        self.colourFilter = None
        self.filename = filename

        self.selectedDataSource = inpFilt.h5rSource(filename)
        self.dataSources.append(self.selectedDataSource)

        self.mdh = MetaDataHandler.HDFMDHandler(self.selectedDataSource.h5f)

        if 'Camera.ROIWidth' in self.mdh.getEntryNames():
            x0 = 0
            y0 = 0

            x1 = self.mdh.getEntry(
                'Camera.ROIWidth') * 1e3 * self.mdh.getEntry('voxelsize.x')
            y1 = self.mdh.getEntry(
                'Camera.ROIHeight') * 1e3 * self.mdh.getEntry('voxelsize.y')

            if 'Splitter' in self.mdh.getEntry('Analysis.FitModule'):
                y1 = y1 / 2

            self.imageBounds = ImageBounds(x0, y0, x1, y1)
        else:
            self.imageBounds = ImageBounds.estimateFromSource(
                self.selectedDataSource)

        if 'fitResults_Ag' in self.selectedDataSource.keys():
            #if we used the splitter set up a mapping so we can filter on total amplitude and ratio
            #if not 'fitError_Ag' in self.selectedDataSource.keys():

            if 'fitError_Ag' in self.selectedDataSource.keys():
                self.selectedDataSource = inpFilt.mappingFilter(
                    self.selectedDataSource,
                    A='fitResults_Ag + fitResults_Ar',
                    gFrac='fitResults_Ag/(fitResults_Ag + fitResults_Ar)',
                    error_gFrac=
                    'sqrt((fitError_Ag/fitResults_Ag)**2 + (fitError_Ag**2 + fitError_Ar**2)/(fitResults_Ag + fitResults_Ar)**2)*fitResults_Ag/(fitResults_Ag + fitResults_Ar)'
                )
                sg = self.selectedDataSource['fitError_Ag']
                sr = self.selectedDataSource['fitError_Ar']
                g = self.selectedDataSource['fitResults_Ag']
                r = self.selectedDataSource['fitResults_Ar']
                I = self.selectedDataSource['A']
                self.selectedDataSource.colNorm = np.sqrt(
                    2 * np.pi) * sg * sr / (2 * np.sqrt(sg**2 + sr**2) * I) * (
                        scipy.special.erf(
                            (sg**2 * r + sr**2 * (I - g)) /
                            (np.sqrt(2) * sg * sr * np.sqrt(sg**2 + sr**2))) -
                        scipy.special.erf(
                            (sg**2 * (r - I) - sr**2 * g) /
                            (np.sqrt(2) * sg * sr * np.sqrt(sg**2 + sr**2))))
                self.selectedDataSource.setMapping('ColourNorm', '1.0*colNorm')
            else:
                self.selectedDataSource = inpFilt.mappingFilter(
                    self.selectedDataSource,
                    A='fitResults_Ag + fitResults_Ar',
                    gFrac='fitResults_Ag/(fitResults_Ag + fitResults_Ar)',
                    error_gFrac='0*x + 0.01')
                self.selectedDataSource.setMapping('fitError_Ag',
                                                   '1*sqrt(fitResults_Ag/1)')
                self.selectedDataSource.setMapping('fitError_Ar',
                                                   '1*sqrt(fitResults_Ar/1)')
                sg = self.selectedDataSource['fitError_Ag']
                sr = self.selectedDataSource['fitError_Ar']
                g = self.selectedDataSource['fitResults_Ag']
                r = self.selectedDataSource['fitResults_Ar']
                I = self.selectedDataSource['A']
                self.selectedDataSource.colNorm = np.sqrt(
                    2 * np.pi) * sg * sr / (2 * np.sqrt(sg**2 + sr**2) * I) * (
                        scipy.special.erf(
                            (sg**2 * r + sr**2 * (I - g)) /
                            (np.sqrt(2) * sg * sr * np.sqrt(sg**2 + sr**2))) -
                        scipy.special.erf(
                            (sg**2 * (r - I) - sr**2 * g) /
                            (np.sqrt(2) * sg * sr * np.sqrt(sg**2 + sr**2))))
                self.selectedDataSource.setMapping('ColourNorm', '1.0*colNorm')

            self.dataSources.append(self.selectedDataSource)

        elif 'fitResults_sigxl' in self.selectedDataSource.keys():
            self.selectedDataSource = inpFilt.mappingFilter(
                self.selectedDataSource)
            self.dataSources.append(self.selectedDataSource)

            self.selectedDataSource.setMapping(
                'sig', 'fitResults_sigxl + fitResults_sigyu')
            self.selectedDataSource.setMapping(
                'sig_d', 'fitResults_sigxl - fitResults_sigyu')

            self.selectedDataSource.dsigd_dz = -30.
            self.selectedDataSource.setMapping('fitResults_z0',
                                               'dsigd_dz*sig_d')
        else:
            self.selectedDataSource = inpFilt.mappingFilter(
                self.selectedDataSource)
            self.dataSources.append(self.selectedDataSource)

        if 'Events' in self.selectedDataSource.resultsSource.h5f.root:
            self.events = self.selectedDataSource.resultsSource.h5f.root.Events[:]

            evKeyNames = set()
            for e in self.events:
                evKeyNames.add(e['EventName'])

            if 'ProtocolFocus' in evKeyNames:
                self.zm = piecewiseMapping.GeneratePMFromEventList(
                    self.events, self.mdh, self.mdh.getEntry('StartTime'),
                    self.mdh.getEntry('Protocol.PiezoStartPos'))
                self.z_focus = 1.e3 * self.zm(self.selectedDataSource['t'])
                #self.elv.SetCharts([('Focus [um]', self.zm, 'ProtocolFocus'),])

                self.selectedDataSource.z_focus = self.z_focus
                self.selectedDataSource.setMapping('focus', 'z_focus')

            if 'ScannerXPos' in evKeyNames:
                x0 = 0
                if 'Positioning.Stage_X' in self.mdh.getEntryNames():
                    x0 = self.mdh.getEntry('Positioning.Stage_X')
                self.xm = piecewiseMapping.GeneratePMFromEventList(
                    self.elv.eventSource, self.mdh,
                    self.mdh.getEntry('StartTime'), x0, 'ScannerXPos', 0)

                self.selectedDataSource.scan_x = 1.e3 * self.xm(
                    self.selectedDataSource['t'] - .01)
                self.selectedDataSource.setMapping('ScannerX', 'scan_x')
                self.selectedDataSource.setMapping('x', 'x + scan_x')

            if 'ScannerYPos' in evKeyNames:
                y0 = 0
                if 'Positioning.Stage_Y' in self.mdh.getEntryNames():
                    y0 = self.mdh.getEntry('Positioning.Stage_Y')
                self.ym = piecewiseMapping.GeneratePMFromEventList(
                    self.elv.eventSource, self.mdh,
                    self.mdh.getEntry('StartTime'), y0, 'ScannerYPos', 0)

                self.selectedDataSource.scan_y = 1.e3 * self.ym(
                    self.selectedDataSource['t'] - .01)
                self.selectedDataSource.setMapping('ScannerY', 'scan_y')
                self.selectedDataSource.setMapping('y', 'y + scan_y')

            if 'ScannerXPos' in evKeyNames or 'ScannerYPos' in evKeyNames:
                self.imageBounds = ImageBounds.estimateFromSource(
                    self.selectedDataSource)

        if not 'foreShort' in dir(self.selectedDataSource):
            self.selectedDataSource.foreShort = 1.

        if not 'focus' in self.selectedDataSource.mappings.keys():
            self.selectedDataSource.focus = np.zeros(
                self.selectedDataSource['x'].shape)

        if 'fitResults_z0' in self.selectedDataSource.keys():
            self.selectedDataSource.setMapping(
                'z', 'fitResults_z0 + foreShort*focus')
        else:
            self.selectedDataSource.setMapping('z', 'foreShort*focus')

        #if we've done a 3d fit
        #print self.selectedDataSource.keys()
        for k in self.filterKeys.keys():
            if not k in self.selectedDataSource.keys():
                self.filterKeys.pop(k)

        #print self.filterKeys
        self.RegenFilter()

        if 'Sample.Labelling' in self.mdh.getEntryNames():
            self.SpecFromMetadata()
예제 #16
0
    def OpenFile(self, filename='', ds=None, **kwargs):
        '''Open a file - accepts optional keyword arguments for use with files
        saved as .txt and .mat. These are:
            
            FieldNames: a list of names for the fields in the text file or
                        matlab variable.
            VarName:    the name of the variable in the .mat file which 
                        contains the data.
            SkipRows:   Number of header rows to skip for txt file data
            
            PixelSize:  Pixel size if not in nm
            
        '''

        #close any files we had open previously
        while len(self.filesToClose) > 0:
            self.filesToClose.pop().close()

        #clear our state
        self.dataSources = []
        if 'zm' in dir(self):
            del self.zm
        self.filter = None
        self.mapping = None
        self.colourFilter = None
        self.events = None
        self.mdh = MetaDataHandler.NestedClassMDHandler()

        self.filename = filename

        if not ds is None:
            self.selectedDataSource = ds
            self.dataSources.append(ds)
        elif os.path.splitext(filename)[1] == '.h5r':
            try:
                self.selectedDataSource = inpFilt.h5rSource(filename)
                self.dataSources.append(self.selectedDataSource)

                self.filesToClose.append(self.selectedDataSource.h5f)

                if 'DriftResults' in self.selectedDataSource.h5f.root:
                    self.dataSources.append(
                        inpFilt.h5rDSource(self.selectedDataSource.h5f))

                    if len(self.selectedDataSource['x']) == 0:
                        self.selectedDataSource = self.dataSources[-1]

            except:  #fallback to catch series that only have drift data
                self.selectedDataSource = inpFilt.h5rDSource(filename)
                self.dataSources.append(self.selectedDataSource)

                self.filesToClose.append(self.selectedDataSource.h5f)

            #catch really old files which don't have any metadata
            if 'MetaData' in self.selectedDataSource.h5f.root:
                self.mdh = MetaDataHandler.HDFMDHandler(
                    self.selectedDataSource.h5f)

            if ('Events' in self.selectedDataSource.h5f.root) and (
                    'StartTime' in self.mdh.keys()):
                self.events = self.selectedDataSource.h5f.root.Events[:]

        elif os.path.splitext(filename)[1] == '.mat':  #matlab file
            ds = inpFilt.matfileSource(filename, kwargs['FieldNames'],
                                       kwargs['VarName'])
            self.selectedDataSource = ds
            self.dataSources.append(ds)

        elif os.path.splitext(filename)[1] == '.csv':
            #special case for csv files - tell np.loadtxt to use a comma rather than whitespace as a delimeter
            if 'SkipRows' in kwargs.keys():
                ds = inpFilt.textfileSource(filename,
                                            kwargs['FieldNames'],
                                            delimiter=',',
                                            skiprows=kwargs['SkipRows'])
            else:
                ds = inpFilt.textfileSource(filename,
                                            kwargs['FieldNames'],
                                            delimiter=',')
            self.selectedDataSource = ds
            self.dataSources.append(ds)

        else:  #assume it's a tab (or other whitespace) delimited text file
            if 'SkipRows' in kwargs.keys():
                ds = inpFilt.textfileSource(filename,
                                            kwargs['FieldNames'],
                                            skiprows=kwargs['SkipRows'])
            else:
                ds = inpFilt.textfileSource(filename, kwargs['FieldNames'])
            self.selectedDataSource = ds
            self.dataSources.append(ds)

        #wrap the data source with a mapping so we can fiddle with things
        #e.g. combining z position and focus
        self.inputMapping = inpFilt.mappingFilter(self.selectedDataSource)
        self.selectedDataSource = self.inputMapping
        self.dataSources.append(self.inputMapping)

        if 'PixelSize' in kwargs.keys():
            self.selectedDataSource.pixelSize = kwargs['PixelSize']
            self.selectedDataSource.setMapping('x', 'x*pixelSize')
            self.selectedDataSource.setMapping('y', 'y*pixelSize')

        #Retrieve or estimate image bounds
        if 'Camera.ROIWidth' in self.mdh.getEntryNames():
            x0 = 0
            y0 = 0

            x1 = self.mdh.getEntry(
                'Camera.ROIWidth') * 1e3 * self.mdh.getEntry('voxelsize.x')
            y1 = self.mdh.getEntry(
                'Camera.ROIHeight') * 1e3 * self.mdh.getEntry('voxelsize.y')

            if 'Splitter' in self.mdh.getEntry('Analysis.FitModule'):
                if 'Splitter.Channel0ROI' in self.mdh.getEntryNames():
                    rx0, ry0, rw, rh = self.mdh['Splitter.Channel0ROI']
                    x1 = rw * 1e3 * self.mdh.getEntry('voxelsize.x')
                    x1 = rh * 1e3 * self.mdh.getEntry('voxelsize.x')
                else:
                    y1 = y1 / 2

            self.imageBounds = ImageBounds(x0, y0, x1, y1)
        else:
            self.imageBounds = ImageBounds.estimateFromSource(
                self.selectedDataSource)

        #extract information from any events
        self._processEvents()

        #handle special cases which get detected by looking for the presence or
        #absence of certain variables in the data.
        if 'fitResults_Ag' in self.selectedDataSource.keys():
            #if we used the splitter set up a number of mappings e.g. total amplitude and ratio
            self._processSplitter()

        if 'fitResults_ratio' in self.selectedDataSource.keys():
            #if we used the splitter set up a number of mappings e.g. total amplitude and ratio
            self._processPriSplit()

        if 'fitResults_sigxl' in self.selectedDataSource.keys():
            #fast, quickpalm like astigmatic fitting
            self.selectedDataSource.setMapping(
                'sig', 'fitResults_sigxl + fitResults_sigyu')
            self.selectedDataSource.setMapping(
                'sig_d', 'fitResults_sigxl - fitResults_sigyu')

            self.selectedDataSource.dsigd_dz = -30.
            self.selectedDataSource.setMapping('fitResults_z0',
                                               'dsigd_dz*sig_d')

        if not 'y' in self.selectedDataSource.keys():
            self.selectedDataSource.setMapping('y', '10*t')

        #set up correction for foreshortening and z focus stepping
        if not 'foreShort' in dir(self.selectedDataSource):
            self.selectedDataSource.foreShort = 1.

        if not 'focus' in self.selectedDataSource.mappings.keys():
            self.selectedDataSource.focus = np.zeros(
                self.selectedDataSource['x'].shape)

        if 'fitResults_z0' in self.selectedDataSource.keys():
            self.selectedDataSource.setMapping(
                'z', 'fitResults_z0 + foreShort*focus')
        elif not 'z' in self.selectedDataSource.keys():
            self.selectedDataSource.setMapping('z', 'foreShort*focus')

        #Fit module specific filter settings
        if 'Analysis.FitModule' in self.mdh.getEntryNames():
            fitModule = self.mdh['Analysis.FitModule']

            print 'fitModule = %s' % fitModule

            if 'Interp' in fitModule:
                self.filterKeys['A'] = (5, 100000)

            if 'LatGaussFitFR' in fitModule:
                self.selectedDataSource.nPhot = getPhotonNums(
                    self.selectedDataSource, self.mdh)
                self.selectedDataSource.setMapping('nPhotons', 'nPhot')

            if fitModule == 'SplitterShiftEstFR':
                self.filterKeys['fitError_dx'] = (0, 10)
                self.filterKeys['fitError_dy'] = (0, 10)

        #remove any keys from the filter which are not present in the data
        for k in self.filterKeys.keys():
            if not k in self.selectedDataSource.keys():
                self.filterKeys.pop(k)

        self.Rebuild()

        if 'Sample.Labelling' in self.mdh.getEntryNames(
        ) and 'gFrac' in self.selectedDataSource.keys():
            self.SpecFromMetadata()