コード例 #1
0
ファイル: record_thread.py プロジェクト: outofculture/acq4
    def writeFrames(self, frames, dh):
        newRec = self.currentStack is None

        if newRec:
            self.startFrameTime = frames[0][1]['time']

        times = [f[1]['time'] for f in frames]
        translations = np.array(
            [f[1]['transform'].getTranslation() for f in frames])
        arrayInfo = [{
            'name': 'Time',
            'values': array(times) - self.startFrameTime,
            'units': 's',
            'translation': translations
        }, {
            'name': 'X'
        }, {
            'name': 'Y'
        }]
        imgs = [f[0][np.newaxis, ...] for f in frames]

        data = MetaArray(np.concatenate(imgs, axis=0), info=arrayInfo)
        if newRec:
            self.currentStack = dh.writeFile(data,
                                             'video',
                                             autoIncrement=True,
                                             info=frames[0][1],
                                             appendAxis='Time',
                                             appendKeys=['translation'])
        else:
            data.write(self.currentStack.name(),
                       appendAxis='Time',
                       appendKeys=['translation'])
コード例 #2
0
 def saveMA(self, fileName=None):
     if self.imgData is None:
         raise HelpfulException("There is no processed data to save.")
     if fileName is None:
         dh = self.getElement("File Loader").baseDir().name()
         self.fileDialog = FileDialog(None, "Save image data", dh, '*.ma')
         self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
         self.fileDialog.show()
         self.fileDialog.fileSelected.connect(self.saveMA)
         return  
     
     table = self.dbquery.table()
     x = table['xPos'].min()
     y = table['yPos'].min()        
     
     #print "params:", self.imgData.dtype.names
     #print "shape:", self.imgData.shape
     #arr = MetaArray(self.currentData) ### need to format this with axes and info
     arr = MetaArray([self.imgData[p] for p in self.imgData.dtype.names], info=[
         {'name':'vals', 'cols':[{'name':p} for p in self.imgData.dtype.names]},
         {'name':'xPos', 'units':'m', 'values':np.arange(self.imgData.shape[0])*self.spacing+x},
         {'name':'yPos', 'units':'m', 'values':np.arange(self.imgData.shape[1])*self.spacing+y},
         
         {'spacing':self.spacing}
     ]) 
     
     arr.write(fileName)    
コード例 #3
0
    def saveMA(self, fileName=None):
        if fileName is None:
            dh = self.getElement("File Loader").baseDir().name()
            self.fileDialog = FileDialog(None, "Save traces", dh, '*.ma')
            self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
            self.fileDialog.show()
            self.fileDialog.fileSelected.connect(self.saveMA)
            return

        #arr = MetaArray(self.currentData) ### need to format this with axes and info
        arr = MetaArray([
            self.currentData['Rs'], self.currentData['Rm'],
            self.currentData['Ih']
        ],
                        info=[{
                            'name':
                            'vals',
                            'cols': [{
                                'name': 'Rs',
                                'units': 'Ohms'
                            }, {
                                'name': 'Rm',
                                'units': 'Ohms'
                            }, {
                                'name': 'Ih',
                                'units': 'A'
                            }]
                        }, {
                            'name': 'time',
                            'units': 's',
                            'values': self.currentData['time']
                        }])

        arr.write(fileName)
コード例 #4
0
ファイル: MetaArray.py プロジェクト: ablot/acq4
 def write(cls, data, dirHandle, fileName, **args):
     """Write data to fileName.
     Return the file name written (this allows the function to modify the requested file name)
     """
     ext = cls.extensions[0]
     if fileName[-len(ext):] != ext:
         fileName = fileName + ext
         
     if not isinstance(data, MA):
         data = MA(data)
     data.write(os.path.join(dirHandle.name(), fileName), **args)
     return fileName
コード例 #5
0
ファイル: MetaArray.py プロジェクト: outofculture/acq4
    def write(cls, data, dirHandle, fileName, **args):
        """Write data to fileName.
        Return the file name written (this allows the function to modify the requested file name)
        """
        ext = cls.extensions[0]
        if fileName[-len(ext):] != ext:
            fileName = fileName + ext

        if not isinstance(data, MA):
            data = MA(data)
        data.write(os.path.join(dirHandle.name(), fileName), **args)
        return fileName
コード例 #6
0
ファイル: CellHealthTracker.py プロジェクト: ablot/acq4
 def saveMA(self, fileName=None):
     if fileName is None:
         dh = self.getElement("File Loader").baseDir().name()
         self.fileDialog = FileDialog(None, "Save traces", dh, '*.ma')
         self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
         self.fileDialog.show()
         self.fileDialog.fileSelected.connect(self.saveMA)
         return  
     
     #arr = MetaArray(self.currentData) ### need to format this with axes and info
     arr = MetaArray([self.currentData['Rs'], self.currentData['Rm'], self.currentData['Ih']], info=[
         {'name':'vals', 'cols':[
             {'name':'Rs', 'units':'Ohms'},
             {'name':'Rm', 'units':'Ohms'},
             {'name':'Ih', 'units':'A'}]},
         {'name':'time', 'units':'s', 'values':self.currentData['time']}]) 
     
     arr.write(fileName)
コード例 #7
0
    def recordFrame(self, frame, iter, depthIndex):
        # Handle new frame
        dh = self.prot['storageDir']
        name = 'image_%03d' % iter

        if self.prot['zStack']:
            # start or append focus stack
            arrayInfo = [
                {'name': 'Depth', 'values': [self.prot['zStackValues'][depthIndex]]},
                {'name': 'X'},
                {'name': 'Y'}
            ]
            data = MetaArray(frame.getImage()[np.newaxis, ...], info=arrayInfo)
            if depthIndex == 0:
                self.currentDepthStack = dh.writeFile(data, name, info=frame.info(), appendAxis='Depth')
            else:
                data.write(self.currentDepthStack.name(), appendAxis='Depth')

        else:
            # record single-frame image
            arrayInfo = [
                {'name': 'X'},
                {'name': 'Y'}
            ]
            data = MetaArray(frame.getImage(), info=arrayInfo)
            dh.writeFile(data, name, info=frame.info())
コード例 #8
0
ファイル: RecordThread.py プロジェクト: ablot/acq4
 def writeFrames(self, frames, newRec):
     times = [f[1]['time'] for f in frames]
     arrayInfo = [
         {'name': 'Time', 'values': array(times) - self.startFrameTime, 'units': 's'},
         {'name': 'X'},
         {'name': 'Y'}
     ]
     #import random
     #if random.random() < 0.01:
         #raise Exception("TEST")
     imgs = [f[0][np.newaxis,...] for f in frames]
     
     data = MetaArray(np.concatenate(imgs, axis=0), info=arrayInfo)
     if newRec:
         self.currentRecord = self.m.getCurrentDir().writeFile(data, 'video', autoIncrement=True, info=frames[0][1], appendAxis='Time')
         self.currentFrameNum = 0
     else:
         data.write(self.currentRecord.name(), appendAxis='Time')
         s = 1.0/self.currentFrameNum
         
     #self.showMessage("Recording %s - %d" % (self.currentRecord.name(), self.currentFrameNum))
     
     self.currentFrameNum += len(frames)
コード例 #9
0
    def getResult(self):
        ## Access data recorded from DAQ task
        ## create MetaArray and fill with MC state info
        #self.state['startTime'] = self.daqTasks[self.daqTasks.keys()[0]].getStartTime()
        with self.dev.lock:
            channels = self.getUsedChannels()
            #print channels
            result = {}
            #result['info'] = self.state
            for ch in channels:
                chConf = self.dev.config[ch + 'Channel']
                result[ch] = self.daqTasks[ch].getData(chConf['channel'])
                # print result[ch]
                nPts = result[ch]['info']['numPts']
                rate = result[ch]['info']['rate']
                if ch == 'command':
                    #result[ch]['data'] = result[ch]['data'] / self.dev.config['cmdScale'][self.cmd['mode']]
                    result[ch]['data'] = result[ch]['data'] * self.state[
                        'extCmdScale']
                    result[ch]['name'] = 'command'
                    if self.cmd['mode'] == 'VC':
                        result[ch]['units'] = 'V'
                    else:
                        result[ch]['units'] = 'A'
                else:
                    #scale = 1.0 / self.state[ch + 'Signal'][1]
                    scale = self.state[ch + 'ScaleFactor']
                    result[ch]['data'] = result[ch]['data'] * scale
                    #result[ch]['units'] = self.state[ch + 'Signal'][2]
                    result[ch]['units'] = self.state[ch + 'Units']
                    result[ch]['name'] = ch
            # print result

            if len(result) == 0:
                return None

            ## Copy state from first channel (assume this is the same for all channels)
            #firstChInfo = result[channels[0]]['info']
            #for k in firstChInfo:
            #self.state[k] = firstChInfo[k]
            daqState = {}
            for ch in result:
                daqState[ch] = result[ch]['info']

            ## record command holding value
            if 'command' not in daqState:
                daqState['command'] = {}
            daqState['command']['holding'] = self.holdingVal

            #timeVals = linspace(0, float(self.state['numPts']-1) / float(self.state['rate']), self.state['numPts'])
            timeVals = linspace(0, float(nPts - 1) / float(rate), nPts)
            chanList = [atleast_2d(result[x]['data']) for x in result]
            # for l in chanList:
            # print l.shape
            cols = [(result[x]['name'], result[x]['units']) for x in result]
            # print cols
            #print [a.shape for a in chanList]
            try:
                arr = concatenate(chanList)
            except:
                for a in chanList:
                    print a.shape
                raise

            info = [
                axis(name='Channel', cols=cols),
                axis(name='Time', units='s', values=timeVals)
            ] + [{
                'ClampState': self.state,
                'DAQ': daqState
            }]

            taskInfo = self.cmd.copy()
            if 'command' in taskInfo:
                del taskInfo['command']
            info[-1]['Protocol'] = taskInfo
            info[-1]['startTime'] = result[result.keys()
                                           [0]]['info']['startTime']

            marr = MetaArray(arr, info=info)

            return marr
コード例 #10
0
    def updateProfiles(self):
        #if not self.analyzeBtn.isChecked():
        #return
        plots = self.getElement('profiles'), self.getElement('profile fits')
        for plot in plots:
            plot.clear()
            plot.setLabel('bottom', 'distance', units='m')
        width, height = self.normData.shape
        xVals = np.linspace(0, self.px[0] * width, width)
        fits = []

        def slopeGaussian(v, x):  ## gaussian + slope
            return fn.gaussian(v[:4], x) + v[4] * x

        def gaussError(
                v, x, y):  ## center-weighted error functionfor sloped gaussian
            err = abs(y - slopeGaussian(v, x))
            v2 = [2.0, v[1], v[2] * 0.3, 1.0, 0.0]
            return err * slopeGaussian(v2, x)

        with pg.ProgressDialog("Processing..", 0, height - 1,
                               cancelText=None) as dlg:
            for i in range(height):
                row = self.normData[:, i]
                guess = [
                    row.max() - row.min(), xVals[int(width / 2)],
                    self.px[0] * 3,
                    row.max(), 0.0
                ]
                #fit = fn.fitGaussian(xVals=xVals, yVals=row, guess=guess)[0]
                #fit = fn.fit(slopeGaussian, xVals=xVals, yVals=row, guess=guess)[0]
                fit = scipy.optimize.leastsq(gaussError,
                                             guess,
                                             args=(xVals, row))[0]
                fit[2] = abs(fit[2])
                dist = fit[1] / (self.px[0] * width / 2.)
                #print fit, dist
                ## sanity check on fit
                if abs(dist - 1) > 0.5 or (0.5 < fit[3] / np.median(row) >
                                           2.0):
                    #print "rejected:", fit, fit[3]/np.median(row), self.px[0]*width/2.
                    #fit = guess[:]
                    #fit[0] = 0
                    fit = [0, 0, 0, 0, 0]
                else:
                    # round 2: eliminate anomalous points and re-fit
                    fitCurve = slopeGaussian(fit, xVals)
                    diff = row - fitCurve
                    std = diff.std()
                    mask = abs(diff) < std * 1.5
                    x2 = xVals[mask]
                    y2 = row[mask]
                    fit = fn.fit(slopeGaussian, xVals=x2, yVals=y2,
                                 guess=fit)[0]
                fits.append(fit)
                dlg += 1
                if dlg.wasCanceled():
                    raise Exception("Processing canceled by user")

        for i in range(len(fits)):  ## plot in reverse order
            pen = pg.intColor(height - i, height * 1.4)
            plots[0].plot(y=self.normData[:, -1 - i], x=xVals, pen=pen)
            plots[1].plot(y=slopeGaussian(fits[-1 - i], xVals),
                          x=xVals,
                          pen=pen)

        yVals = np.linspace(0, self.px[0] * height, height)
        arr = np.array(fits)
        info = [{
            'name': 'depth',
            'units': 'm',
            'values': yVals
        }, {
            'name':
            'fitParams',
            'cols': [
                {
                    'name': 'Amplitude'
                },
                {
                    'name': 'X Offset'
                },
                {
                    'name': 'Sigma',
                    'units': 'm'
                },
                {
                    'name': 'Y Offset'
                },
                {
                    'name': 'Slope'
                },
            ]
        }, {
            'sourceImage':
            self.fileHandle.name(),
            'dataRegion':
            self.dataRgn.saveState(),
            'backgroundRegion':
            self.bgRgn.saveState(),
            'description':
            """
                    The source image was normalized for background fluorescence, then each row was fit to a sloped gaussian function:
                        v[0] * np.exp(-((x-v[1])**2) / (2 * v[2]**2)) + v[3] + v[4] * x
                    The fit parameters v[0..4] for each image row are stored in the columns of this data set.
                    """
        }]
        #print info
        self.data = MetaArray(arr, info=info)
        self.showResults(self.data)
コード例 #11
0
    def getResult(self):
        ## Access data recorded from DAQ task
        ## create MetaArray and fill with MC state info

        ## Collect data and info for each channel in the command
        result = {}
        for ch in self.bufferedChannels:
            result[ch] = self.daqTasks[ch].getData(
                self.dev._DGConfig[ch]['channel'])
            result[ch]['data'] = self.mapping.mapFromDaq(
                ch, result[ch]['data'])  ## scale/offset/invert
            result[ch]['units'] = self.getChanUnits(ch)

        if len(result) > 0:
            meta = result[list(result.keys())[0]]['info']
            rate = meta['rate']
            nPts = meta['numPts']
            ## Create an array of time values
            timeVals = np.linspace(0, float(nPts - 1) / float(rate), nPts)

            ## Concatenate all channels together into a single array, generate MetaArray info
            chanList = [np.atleast_2d(result[x]['data']) for x in result]
            cols = [(x, result[x]['units']) for x in result]
            # print cols
            try:
                arr = np.concatenate(chanList)
            except:
                print(chanList)
                print([a.shape for a in chanList])
                raise

            daqState = OrderedDict()
            for ch in self.dev._DGConfig:
                if ch in result:
                    daqState[ch] = result[ch]['info']
                else:
                    daqState[ch] = {}

                ## record current holding value for all output channels (even those that were not buffered for this task)
                if self.dev._DGConfig[ch]['type'] in ['ao', 'do']:

                    daqState[ch]['holding'] = self.holdingVals[ch]

            info = [
                axis(name='Channel', cols=cols),
                axis(name='Time', units='s', values=timeVals)
            ] + [{
                'DAQ': daqState
            }]

            protInfo = self._DAQCmd.copy(
            )  ## copy everything but the command arrays and low-level configuration info
            for ch in protInfo:
                protInfo[ch].pop('command', None)
                protInfo[ch].pop('lowLevelConf', None)
            info[-1]['Protocol'] = protInfo

            marr = MetaArray(arr, info=info)
            return marr

        else:
            return None
コード例 #12
0
def parseNii(headerFH, imgFile):
    m = Obj()
    
    ## see nifti1.h
    header = headerFH.read(348)
    if len(header) != 348:
        raise Exception("Header is wrong size! (expected 348, got %d" % len(header))
    
    order = getByteOrder(header[:4])
    
    ## break up into substructs
    header_key = struct.unpack(order+'i10s18sihcc', header[:40])
    image_dim = struct.unpack(order+'8h3f4h11fhcB4f2i', header[40:148])
    data_history = struct.unpack(order+'80s24s2h18f16s4s', header[148:348])
    
    extension = headerFH.read(4)
    if len(extension) < 4:
        extension = '\0\0\0\0'
    
    ## pull variables from substructs
    dim_info = header_key[-1]  ## all others are unused in NiFTI
    
    m.dim = image_dim[:8]
    m.intent = image_dim[8:11]
    m.intent_code, m.datatype, m.bitpix, m.slice_start = image_dim[11:15]
    m.pixdim = image_dim[15:23]
    m.qfac = m.pixdim[0]
    if m.qfac == 0.0:
        m.qfac = 1.0
    m.vox_offset, m.scl_slope, m.scl_inter, m.slice_end, m.slice_code = image_dim[23:28]
    m.xyzt_units, m.cal_max, m.cal_min, m.slice_duration, m.toffset, m.glmax, m.glmin = image_dim[28:35]
    
    m.descrip, m.aux_file, m.qform_code, m.sform_code = data_history[:4]
    m.quatern = data_history[4:7]
    m.qoffset = data_history[7:10]
    m.srow_x = data_history[10:14]
    m.srow_y = data_history[14:18]
    m.srow_z = data_history[18:22]
    m.intent_name, m.magic = data_history[22:]
    
    try:
        m.descrip = m.descrip[:m.descrip.index('\0')]
    except ValueError:
        pass
    
    #print "Description:", descrip[:descrip.index('\0')]

    ## sanity checks
    if m.magic not in ['nii\0', 'n+1\0']:
        raise Exception('Unsupported NiFTI version: "%s"' % m.magic)
    if m.dim[0] > 7:
        raise Exception('Dim > 7 not supported. (got %d)' % m.dim[0])
    m.vox_offset = int(m.vox_offset)


    ## read extended data (nothing done here yet, we just let the user know that the data is there.)
    if extension[0] != '\0':
        ext = headerFH.read(8)
        esize, ecode = struct.unpack('2i', ext)
        #edata = headerFH.read(esize-8)
        
        if ecode == 2:
            print "Header has extended data in DICOM format (ignored)"
        elif ecode == 4: 
            print "Header has extended data in AFNI format (ignored)"
        else:
            print "Header has extended data in unknown format (code=%d; ignored)" % ecode
    
    ## do a little parsing
    shape = m.dim[1:m.dim[0]+1]
    size = (m.bitpix / 8) * reduce(lambda a,b: a*b, shape)
    dtype = niiDataTypes[m.datatype]
    if isinstance(dtype, basestring):
        raise Exception("Data type not supported: %s"% dtype)
    #print "Dimensions:", dim[0]
    #print "Data shape:", shape
    #print "Data type: %s  (%dbpp)" % (str(dtype), bitpix)
    
    ## read image data. Anything more than 200MB, use memmap.
    if size < 200e6:
        if m.magic == 'n+1\0':  ## data is in the same file as the header
            m.vox_offset = max(352, m.vox_offset)
            headerFH.seek(int(m.vox_offset))
            data = headerFH.read(size)
        elif m.magic == 'nii\0':               ## data is in a separate .img file
            imgFile = os.path.splitext(hdrFile)[0] + '.img'
            fh = open(imgFile, 'rb')
            fh.seek(m.vox_offset)
            data = fh.read(size)
        headerFH.close()
        
        if len(data) != size:
            raise Exception("Data size is incorrect. Expected %d, got %d" % (size, len(data)))
            
        data = np.fromstring(data, dtype=dtype)
        data.shape = m.dim[1:m.dim[0]+1]
    else:
        #print "Large file; loading by memmap"
        if m.magic == 'n+1\0':  ## data is in the same file as the header
            m.vox_offset = max(352, m.vox_offset)
            fh = headerFH
        elif m.magic == 'nii\0':               ## data is in a separate .img file
            imgFile = os.path.splitext(hdrFile)[0] + '.img'
            fh = open(imgFile, 'rb')
            headerFH.close()
        data = np.memmap(fh, offset=m.vox_offset, dtype=dtype, shape=shape, mode='r')
    
    ## apply scaling
    if m.scl_slope == 0.0:
        m.scl_slope = 1.0
    
    if (m.scl_slope != 1.0 or m.scl_inter != 0.0) and m.datatype != 128: ## scaling not allowed for RGB24
        #print "Applying scale and offset"
        data = (data.astype(np.float32) * m.scl_slope) + m.scl_inter

    m.xUnits = units[m.xyzt_units & 0x07]
    m.tUnits = units[m.xyzt_units & 0x38]

    info = []
    for x in shape:
        info.append({})
    
    
    ## determine coordinate system
    if m.qform_code > 0:  ## coordinate method 2  (currently rotation matrix is not implemented, only offset.)
        m.xVals = []
        for i in range(min(3, m.dim[0])):
            offset = m.qoffset[i] * m.xUnits[1]
            width = (m.pixdim[i+1] * m.xUnits[1] * (m.dim[i+1]-1))
            info[i]['values'] = np.linspace(offset, offset + width, m.dim[i+1])
    
    if m.sform_code > 0:  ## coordinate method 3
        print "Warning: This data (%s) has an unsupported affine transform." % headerFH.name
        #print "affine transform:"
        #print srow_x
        #print srow_y
        #print srow_z
    else:  ## coordinate method 1
        m.pixdim = list(m.pixdim)
        m.pixdim[1] *= m.xUnits[1]
        m.pixdim[2] *= m.xUnits[1]
        m.pixdim[3] *= m.xUnits[1]
        m.pixdim[4] *= m.tUnits[1]
        #print "Voxel dimensions:", pixdim
        ## try just using pixdim
    
    #print "Voxel units:", xUnits[0]
    
    ## In NiFTI, dimensions MUST represent (x, y, z, t, v, ...)
    ## x = right, y = anterior, z = superior
    names = ['right', 'anterior', 'dorsal', 'time']
    for i in range(min(4, m.dim[0])):
        info[i]['name'] = names[i]
        
    
    ## pixdim[1:] specifies voxel length along each axis
    ## intent_code
    ## orientation
    ## bitpix
    ## cal_min, cal_max are calibrated display black and white levels
    
    info.append(m.__dict__)
    
    #print "dims:", m.dim
    #print info
    data = MetaArray(data, info=info)
    return data