Exemplo n.º 1
0
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

                anaSig = AnalogSignal(signal,
                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
                                                    t_start = pdTime.value * pq.s,
                                                    name = str(entityInfo.szEntityLabel),
                                                    )
                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append( anaSig )


            #segment
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )

                if lazy:
                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp  = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID= ctypes.c_uint32(0)

                    nsample  = int(dwDataBufferSize)
                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                                ctypes.byref(pdwUnitID ) )

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
                    
                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
                                        t_stop = times.max(),
                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
                                        name = str(entityInfo.szEntityLabel),
                                        )
                seg.spiketrains.append(sptr)


            # neuralevent
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [ ]*pq.s
                    t_stop = 0*pq.s
                else:
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData*pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(times, t_stop =t_stop,
                                                name = str(entityInfo.szEntityLabel),)
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 2
0
    def read_segment(self,
                     import_neuroshare_segment=True,
                     lazy=False,
                     cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment(file_origin=os.path.basename(self.filename), )

        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)

        #elif sys.platform.startswith('darwin'):

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info))
        seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' +
                     str(info.dwAPIVersionMin))

        if not cascade:
            return seg

        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename),
                               ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo),
                                  ctypes.sizeof(fileinfo))

        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo(hFile, dwEntityID,
                                        ctypes.byref(entityInfo),
                                        ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo(hFile, dwEntityID,
                                           ctypes.byref(pEventInfo),
                                           ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0:  #TEXT
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:  #CVS
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:  # 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:  # 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:  # 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name=str(entityInfo.szEntityLabel), )
                if not lazy:
                    times = []
                    labels = []
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetEventData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize))
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times * pq.s
                    ea.labels = np.array(labels, dtype='S')
                else:
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo(hFile, dwEntityID,
                                            ctypes.byref(pAnalogInfo),
                                            ctypes.sizeof(pAnalogInfo))
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [] * pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    total_read = 0
                    while total_read < entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount -
                                                      total_read)

                        neuroshare.ns_GetAnalogData(
                            hFile, dwEntityID, dwStartIndex, dwStopIndex,
                            ctypes.byref(pdwContCount),
                            pData[total_read:].ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value

                    signal = pq.Quantity(pData,
                                         units=pAnalogInfo.szUnits,
                                         copy=False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex,
                                             ctypes.byref(pdTime))

                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pAnalogInfo.dSampleRate * pq.Hz,
                    t_start=pdTime.value * pq.s,
                    name=str(entityInfo.szEntityLabel),
                )
                anaSig.annotate(probe_info=str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append(anaSig)

            #segment
            if entity_types[
                    entityInfo.
                    dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo(hFile, dwEntityID,
                                             ctypes.byref(pdwSegmentInfo),
                                             ctypes.sizeof(pdwSegmentInfo))
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)

                for dwSourceID in range(pdwSegmentInfo.dwSourceCount):
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo(
                        hFile, dwEntityID, dwSourceID,
                        ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo))

                if lazy:
                    sptr = SpikeTrain(times,
                                      name=str(entityInfo.szEntityLabel),
                                      t_stop=0. * pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount
                    pData = np.zeros((dwDataBufferSize), dtype='float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID = ctypes.c_uint32(0)

                    nsample = int(dwDataBufferSize)
                    times = np.empty((entityInfo.dwItemCount), dtype='f')
                    waveforms = np.empty(
                        (entityInfo.dwItemCount, nsource, nsample), dtype='f')
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetSegmentData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp),
                            pData.ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                            ctypes.byref(pdwUnitID))

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[
                            dwIndex, :, :] = pData[:nsample * nsource].reshape(
                                nsample, nsource).transpose()

                    sptr = SpikeTrain(
                        times=pq.Quantity(times, units='s', copy=False),
                        t_stop=times.max(),
                        waveforms=pq.Quantity(waveforms,
                                              units=str(
                                                  pdwSegmentInfo.szUnits),
                                              copy=False),
                        left_sweep=nsample / 2. /
                        float(pdwSegmentInfo.dSampleRate) * pq.s,
                        sampling_rate=float(pdwSegmentInfo.dSampleRate) *
                        pq.Hz,
                        name=str(entityInfo.szEntityLabel),
                    )
                seg.spiketrains.append(sptr)

            # neuralevent
            if entity_types[
                    entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo(hFile, dwEntityID,
                                            ctypes.byref(pNeuralInfo),
                                            ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [] * pq.s
                    t_stop = 0 * pq.s
                else:
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData(
                        hFile, dwEntityID, dwStartIndex, dwIndexCount,
                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData * pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(
                    times,
                    t_stop=t_stop,
                    name=str(entityInfo.szEntityLabel),
                )
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
    def read_one_channel_event_or_spike(self, fid, channel_num, header,
                                        lazy=True):
        # return SPikeTrain or Event
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock < 0:
            return
        if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
            return

        # # Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick', 'i4')]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick', 'i4'), ('marker', 'i4')]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('adc', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('real', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('label', 'S%d' % channelHeader.n_extra)]
        dt = np.dtype(fmt)

        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0:
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems

        if lazy:
            if channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = Event()
                ea.annotate(channel_index=channel_num)
                ea.lazy_shape = totalitems
                return ea

            elif channelHeader.kind in [6, 7]:
                # correct value for t_stop to be put in later
                sptr = SpikeTrain([] * pq.s, t_stop=1e99)
                sptr.annotate(channel_index=channel_num, ced_unit = 0)
                sptr.lazy_shape = totalitems
                return sptr
        else:
            alltrigs = np.zeros(totalitems, dtype=dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks):
                blockHeader = HeaderReader(
                    fid, np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring(
                    fid.read(blockHeader.items * dt.itemsize), dtype=dt)

                alltrigs[pos:pos + trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class: eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype(
                'f') * header.us_per_time * header.dtime_base * pq.s

            if channelHeader.kind in [2, 3, 4, 5, 8]:
                #events
                ea = Event(alltimes)
                ea.annotate(channel_index=channel_num)
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels=alltrigs['label'])
                return ea

            elif channelHeader.kind in [6, 7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6:
                    waveforms = np.fromstring(alltrigs['adc'].tostring(),
                                              dtype='i2')
                    waveforms = waveforms.astype(
                        'f4') * channelHeader.scale / 6553.6 + \
                        channelHeader.offset
                elif channelHeader.kind == 7:
                    waveforms = np.fromstring(alltrigs['real'].tostring(),
                                              dtype='f4')

                if header.system_id >= 6 and channelHeader.interleave > 1:
                    waveforms = waveforms.reshape(
                        (alltimes.size, -1, channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1, 2)
                else:
                    waveforms = waveforms.reshape((alltimes.size, 1, -1))

                if header.system_id in [1, 2, 3, 4, 5]:
                    sample_interval = (channelHeader.divide *
                                       header.us_per_time *
                                       header.time_per_adc) * 1e-6
                else:
                    sample_interval = (channelHeader.l_chan_dvd *
                                       header.us_per_time *
                                       header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit])
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit)
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    # can get better value from associated AnalogSignal(s) ?
                    t_stop = alltimes.max()
                else:
                    t_stop = 0.0

                if not self.ced_units:
                    sptr = SpikeTrain(alltimes,
                                                waveforms = waveforms*unit,
                                                sampling_rate = (1./sample_interval)*pq.Hz,
                                                t_stop = t_stop
                                                )
                    sptr.annotate(channel_index = channel_num, ced_unit = 0)
                    return [sptr]

                sptrs = []
                for i in set(alltrigs['marker'] & 255):
                    sptr = SpikeTrain(alltimes[alltrigs['marker'] == i],
                                                waveforms = waveforms[alltrigs['marker'] == i]*unit,
                                                sampling_rate = (1./sample_interval)*pq.Hz,
                                                t_stop = t_stop
                                                )
                    sptr.annotate(channel_index = channel_num, ced_unit = i)
                    sptrs.append(sptr)

                return sptrs
Exemplo n.º 4
0
    def read_segment(self,
                     # the 2 first keyword arguments are imposed by neo.io API
                     lazy = False,
                     cascade = True,
                     # all following arguments are decied by this IO and are free
                     segment_duration = 15.,
                     num_analogsignal = 4,
                     num_spiketrain_by_channel = 3,
                    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000. #Hz
        t_start = -1.


        #time vector for generated signal
        timevect = np.arange(t_start, t_start+ segment_duration , 1./sampling_rate)

        # create an empty segment
        seg = Segment( name = 'it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
                                            channel_index = i ,segment_duration = segment_duration, t_start = t_start)
                seg.analogsignals += [ ana ]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(lazy = lazy , cascade = cascade ,
                                                            segment_duration = segment_duration, t_start = t_start , channel_index = i)
                    seg.spiketrains += [ sptr ]


            # create an Event that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to Event
            # for that you need read_segment(cascade = True)

            if lazy:
                # in lazy case no data are readed
                # eva is empty
                eva = Event()
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva = Event(timevect[(np.random.rand(n)*timevect.size).astype('i')]* pq.s)
                # all duration are the same
                eva.durations = np.ones(n)*500*pq.ms  # Event doesn't have durations. Is Epoch intended here?
                # label
                l = [ ]
                for i in range(n):
                    if np.random.rand()>.6: l.append( 'TriggerA' )
                    else : l.append( 'TriggerB' )
                eva.labels = np.array( l )

            seg.events += [ eva ]

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 5
0
    def read_one_channel_event_or_spike(self,
                                        fid,
                                        channel_num,
                                        header,
                                        lazy=True):
        # return SPikeTrain or Event
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock < 0:
            return
        if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
            return

        # # Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick', 'i4')]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick', 'i4'), ('marker', 'i4')]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('adc', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('real', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('label', 'S%d' % channelHeader.n_extra)]
        dt = np.dtype(fmt)

        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0:
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems

        if lazy:
            if channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = Event()
                ea.annotate(channel_index=channel_num)
                ea.lazy_shape = totalitems
                return ea

            elif channelHeader.kind in [6, 7]:
                # correct value for t_stop to be put in later
                sptr = SpikeTrain([] * pq.s, t_stop=1e99)
                sptr.annotate(channel_index=channel_num, ced_unit=0)
                sptr.lazy_shape = totalitems
                return sptr
        else:
            alltrigs = np.zeros(totalitems, dtype=dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks):
                blockHeader = HeaderReader(fid,
                                           np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring(fid.read(blockHeader.items *
                                               dt.itemsize),
                                      dtype=dt)

                alltrigs[pos:pos + trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class: eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype(
                'f') * header.us_per_time * header.dtime_base * pq.s

            if channelHeader.kind in [2, 3, 4, 5, 8]:
                #events
                ea = Event(alltimes)
                ea.annotate(channel_index=channel_num)
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels=alltrigs['label'])
                return ea

            elif channelHeader.kind in [6, 7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6:
                    waveforms = np.fromstring(alltrigs['adc'].tostring(),
                                              dtype='i2')
                    waveforms = waveforms.astype(
                        'f4') * channelHeader.scale / 6553.6 + \
                        channelHeader.offset
                elif channelHeader.kind == 7:
                    waveforms = np.fromstring(alltrigs['real'].tostring(),
                                              dtype='f4')

                if header.system_id >= 6 and channelHeader.interleave > 1:
                    waveforms = waveforms.reshape(
                        (alltimes.size, -1, channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1, 2)
                else:
                    waveforms = waveforms.reshape((alltimes.size, 1, -1))

                if header.system_id in [1, 2, 3, 4, 5]:
                    sample_interval = (channelHeader.divide *
                                       header.us_per_time *
                                       header.time_per_adc) * 1e-6
                else:
                    sample_interval = (channelHeader.l_chan_dvd *
                                       header.us_per_time * header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit])
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit)
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    # can get better value from associated AnalogSignal(s) ?
                    t_stop = alltimes.max()
                else:
                    t_stop = 0.0

                if not self.ced_units:
                    sptr = SpikeTrain(alltimes,
                                      waveforms=waveforms * unit,
                                      sampling_rate=(1. / sample_interval) *
                                      pq.Hz,
                                      t_stop=t_stop)
                    sptr.annotate(channel_index=channel_num, ced_unit=0)
                    return [sptr]

                sptrs = []
                for i in set(alltrigs['marker'] & 255):
                    sptr = SpikeTrain(
                        alltimes[alltrigs['marker'] == i],
                        waveforms=waveforms[alltrigs['marker'] == i] * unit,
                        sampling_rate=(1. / sample_interval) * pq.Hz,
                        t_stop=t_stop)
                    sptr.annotate(channel_index=channel_num, ced_unit=i)
                    sptrs.append(sptr)

                return sptrs
Exemplo n.º 6
0
    def read_segment(
        self,
        # the 2 first keyword arguments are imposed by neo.io API
        lazy=False,
        cascade=True,
        # all following arguments are decied by this IO and are free
        segment_duration=15.,
        num_analogsignal=4,
        num_spiketrain_by_channel=3,
    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000.  #Hz
        t_start = -1.

        #time vector for generated signal
        timevect = np.arange(t_start, t_start + segment_duration,
                             1. / sampling_rate)

        # create an empty segment
        seg = Segment(name='it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal(lazy=lazy,
                                             cascade=cascade,
                                             channel_index=i,
                                             segment_duration=segment_duration,
                                             t_start=t_start)
                seg.analogsignals += [ana]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(
                        lazy=lazy,
                        cascade=cascade,
                        segment_duration=segment_duration,
                        t_start=t_start,
                        channel_index=i)
                    seg.spiketrains += [sptr]

            # create an Event that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to Event
            # for that you need read_segment(cascade = True)

            if lazy:
                # in lazy case no data are readed
                # eva is empty
                eva = Event()
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva = Event(
                    timevect[(np.random.rand(n) * timevect.size).astype('i')] *
                    pq.s)
                # all duration are the same
                eva.durations = np.ones(
                    n
                ) * 500 * pq.ms  # Event doesn't have durations. Is Epoch intended here?
                # label
                l = []
                for i in range(n):
                    if np.random.rand() > .6: l.append('TriggerA')
                    else: l.append('TriggerB')
                eva.labels = np.array(l)

            seg.events += [eva]

        seg.create_many_to_one_relationship()
        return seg