Ejemplo n.º 1
0
    def readOneChannelEventOrSpike(self, fid, channel_num, header, lazy=True):
        # return SPikeTrain or EventArray
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock < 0: return
        if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]: return

        ## Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick', 'i4')]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick', 'i4'), ('marker', 'i4')]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('adc', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('real', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('label', 'S%d' % channelHeader.n_extra)]
        dt = np.dtype(fmt)

        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0:
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems

        if lazy:
            if channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = EventArray()
                ea.annotate(channel_index=channel_num)
                ea.lazy_shape = totalitems
                return [ea]

            elif channelHeader.kind in [6, 7]:
                sptr = SpikeTrain(
                    [] * pq.s,
                    t_stop=1e99)  # correct value for t_stop to be put in later
                sptr.annotate(channel_index=channel_num, ced_unit=0)
                sptr.lazy_shape = totalitems
                return [sptr]

        else:
            alltrigs = np.zeros(totalitems, dtype=dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks):
                blockHeader = HeaderReader(fid,
                                           np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring(fid.read(blockHeader.items *
                                               dt.itemsize),
                                      dtype=dt)

                alltrigs[pos:pos + trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class : eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype(
                'f') * header.us_per_time * header.dtime_base * pq.s

            if channelHeader.kind in [2, 3, 4, 5, 8]:
                #events
                ea = EventArray()
                ea.annotate(channel_index=channel_num)
                ea.times = alltimes
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels=alltrigs['label'])
                return [ea]

            elif channelHeader.kind in [6, 7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6:
                    waveforms = np.fromstring(alltrigs['adc'].tostring(),
                                              dtype='i2')
                    waveforms = waveforms.astype(
                        'f4'
                    ) * channelHeader.scale / 6553.6 + channelHeader.offset
                elif channelHeader.kind == 7:
                    waveforms = np.fromstring(alltrigs['real'].tostring(),
                                              dtype='f4')

                if header.system_id >= 6 and channelHeader.interleave > 1:
                    waveforms = waveforms.reshape(
                        (alltimes.size, -1, channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1, 2)
                else:
                    waveforms = waveforms.reshape((alltimes.size, 1, -1))

                if header.system_id in [1, 2, 3, 4, 5]:
                    sample_interval = (channelHeader.divide *
                                       header.us_per_time *
                                       header.time_per_adc) * 1e-6
                else:
                    sample_interval = (channelHeader.l_chan_dvd *
                                       header.us_per_time * header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit])
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit)
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    t_stop = alltimes.max(
                    )  # can get better value from associated AnalogSignal(s) ?
                else:
                    t_stop = 0.0

                if not self.ced_units:
                    sptr = SpikeTrain(alltimes,
                                      waveforms=waveforms * unit,
                                      sampling_rate=(1. / sample_interval) *
                                      pq.Hz,
                                      t_stop=t_stop)
                    sptr.annotate(channel_index=channel_num, ced_unit=0)
                    return [sptr]

                sptrs = []
                for i in set(alltrigs['marker'] & 255):
                    sptr = SpikeTrain(
                        alltimes[alltrigs['marker'] == i],
                        waveforms=waveforms[alltrigs['marker'] == i] * unit,
                        sampling_rate=(1. / sample_interval) * pq.Hz,
                        t_stop=t_stop)
                    sptr.annotate(channel_index=channel_num, ced_unit=i)
                    sptrs.append(sptr)

                return sptrs
Ejemplo n.º 2
0
    def read_segment(
        self,
        # the 2 first keyword arguments are imposed by neo.io API
        lazy=False,
        cascade=True,
        # all following arguments are decied by this IO and are free
        segment_duration=15.,
        num_analogsignal=4,
        num_spiketrain_by_channel=3,
    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000.  #Hz
        t_start = -1.

        #time vector for generated signal
        timevect = np.arange(t_start, t_start + segment_duration,
                             1. / sampling_rate)

        # create an empty segment
        seg = Segment(name='it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal(lazy=lazy,
                                             cascade=cascade,
                                             channel_index=i,
                                             segment_duration=segment_duration,
                                             t_start=t_start)
                seg.analogsignals += [ana]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(
                        lazy=lazy,
                        cascade=cascade,
                        segment_duration=segment_duration,
                        t_start=t_start,
                        channel_index=i)
                    seg.spiketrains += [sptr]

            # create an EventArray that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to EventArray
            # for that you need read_segment(cascade = True)
            eva = EventArray()
            if lazy:
                # in lazy case no data are readed
                # eva is empty
                pass
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva.times = timevect[(np.random.rand(n) *
                                      timevect.size).astype('i')] * pq.s
                # all duration are the same
                eva.durations = np.ones(n) * 500 * pq.ms
                # label
                l = []
                for i in range(n):
                    if np.random.rand() > .6: l.append('TriggerA')
                    else: l.append('TriggerB')
                eva.labels = np.array(l)

            seg.eventarrays += [eva]

        create_many_to_one_relationship(seg)
        return seg
Ejemplo n.º 3
0
    def read_segment(
        self,
        cascade=True,
        lazy=False,
    ):
        """
        Arguments:
        """
        f = struct_file(self.filename, 'rb')

        #Name
        f.seek(64, 0)
        surname = f.read(22)
        while surname[-1] == ' ':
            if len(surname) == 0: break
            surname = surname[:-1]
        firstname = f.read(20)
        while firstname[-1] == ' ':
            if len(firstname) == 0: break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(
            name=firstname + ' ' + surname,
            file_origin=os.path.basename(self.filename),
        )
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            return seg

        # area
        f.seek(176, 0)
        zone_names = [
            'ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E',
            'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
            'EVENT B', 'TRIGGER'
        ]
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((rawdata.size / Num_Chan, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromfile(f, dtype='u2', count=Num_Chan)

        units = {
            -1: pq.nano * pq.V,
            0: pq.uV,
            1: pq.mV,
            2: 1,
            100: pq.percent,
            101: pq.dimensionless,
            102: pq.dimensionless
        }

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip("\x00")
            ground = f.read(6).strip("\x00")
            logical_min, logical_max, logical_ground, physical_min, physical_max = f.read_f(
                'iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max -
                               physical_min) / float(logical_max -
                                                     logical_min + 1)
                signal = (rawdata[:, c].astype('f') -
                          logical_ground) * factor * unit

            anaSig = AnalogSignal(signal,
                                  sampling_rate=sampling_rate,
                                  name=label,
                                  channel_index=c)
            if lazy:
                anaSig.lazy_shape = None
            anaSig.annotate(ground=ground)

            seg.analogsignals.append(anaSig)

        sampling_rate = np.mean(
            [anaSig.sampling_rate for anaSig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(
                f.read(length),
                dtype=[('pos', 'u4'), ('label', label_dtype)],
            )
            ea = EventArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (triggers['pos'] !=
                                                           0)
                triggers = triggers[keep]
                ea.labels = triggers['label'].astype('S')
                ea.times = (triggers['pos'] / sampling_rate).rescale('s')
            else:
                ea.lazy_shape = triggers.size
            seg.eventarrays.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[
                                       ('label', 'u4'),
                                       ('start', 'u4'),
                                       ('stop', 'u4'),
                                   ])
            ep = EpochArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (epochs['stop'] <
                                                           rawdata.shape[0])
                epochs = epochs[keep]
                ep.labels = epochs['label'].astype('S')
                ep.times = (epochs['start'] / sampling_rate).rescale('s')
                ep.durations = ((epochs['stop'] - epochs['start']) /
                                sampling_rate).rescale('s')
            else:
                ep.lazy_shape = triggers.size
            seg.epocharrays.append(ep)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 4
0
    def read_segment(self,
                     import_neuroshare_segment=True,
                     lazy=False,
                     cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """

        seg = Segment(file_origin=os.path.basename(self.filename), )

        neuroshare = ctypes.windll.LoadLibrary(self.dllname)

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info))
        seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' +
                     str(info.dwAPIVersionMin))

        if not cascade:
            return seg

        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename),
                               ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo),
                                  ctypes.sizeof(fileinfo))

        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo(hFile, dwEntityID,
                                        ctypes.byref(entityInfo),
                                        ctypes.sizeof(entityInfo))
            #~ print 'type', entityInfo.dwEntityType,entity_types[entityInfo.dwEntityType], 'count', entityInfo.dwItemCount
            #~ print  entityInfo.szEntityLabel

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo(hFile, dwEntityID,
                                           ctypes.byref(pEventInfo),
                                           ctypes.sizeof(pEventInfo))
                #~ print pEventInfo.szCSVDesc, pEventInfo.dwEventType, pEventInfo.dwMinDataLength, pEventInfo.dwMaxDataLength

                if pEventInfo.dwEventType == 0:  #TEXT
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:  #CVS
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:  # 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:  # 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:  # 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = EventArray(name=str(entityInfo.szEntityLabel), )
                if not lazy:
                    times = []
                    labels = []
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetEventData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize))
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData))
                    ea.times = times * pq.s
                    ea.labels = np.array(labels, dtype='S')
                else:
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo(hFile, dwEntityID,
                                            ctypes.byref(pAnalogInfo),
                                            ctypes.sizeof(pAnalogInfo))
                #~ print 'dSampleRate' , pAnalogInfo.dSampleRate , pAnalogInfo.szUnits
                dwStartIndex = ctypes.c_uint32(0)
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [] * pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros((entityInfo.dwItemCount, ), dtype='f8')
                    neuroshare.ns_GetAnalogData(
                        hFile, dwEntityID, dwStartIndex, dwIndexCount,
                        ctypes.byref(pdwContCount),
                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                    neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer),
                                                  256)
                    #~ print 'pszMsgBuffer' , pszMsgBuffer.value
                    signal = pData[:pdwContCount.value] * pq.Quantity(
                        1, pAnalogInfo.szUnits)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex,
                                             ctypes.byref(pdTime))

                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pAnalogInfo.dSampleRate * pq.Hz,
                    t_start=pdTime.value * pq.s,
                    name=str(entityInfo.szEntityLabel),
                )
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append(anaSig)

            #segment
            if entity_types[
                    entityInfo.
                    dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()

                neuroshare.ns_GetSegmentInfo(hFile, dwEntityID,
                                             ctypes.byref(pdwSegmentInfo),
                                             ctypes.sizeof(pdwSegmentInfo))
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                #~ print 'pszMsgBuffer' , pszMsgBuffer.value

                #~ print 'pdwSegmentInfo.dwSourceCount' , pdwSegmentInfo.dwSourceCount
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount):
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo(
                        hFile, dwEntityID, dwSourceID,
                        ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo))

                if lazy:
                    sptr = SpikeTrain(times,
                                      name=str(entityInfo.szEntityLabel))
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount
                    pData = np.zeros((dwDataBufferSize), dtype='f8')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID = ctypes.c_uint32(0)

                    nsample = pdwSampleCount.value
                    times = np.empty((entityInfo.dwItemCount), drtype='f')
                    waveforms = np.empty(
                        (entityInfo.dwItemCount, nsource, nsample), drtype='f')
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetSegmentData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp),
                            pData.ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                            ctypes.byref(pdwUnitID))
                        #print 'dwDataBufferSize' , dwDataBufferSize,pdwSampleCount , pdwUnitID

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[
                            dwIndex, :, :] = pData[:nsample * nsource].reshape(
                                nsample, nsource).transpose()

                    sptr = SpikeTrain(
                        times * pq.s,
                        waveforms=waveforms *
                        pq.Quantity(1., str(pdwSegmentInfo.szUnits)),
                        left_sweep=nsample / 2. /
                        float(pdwSegmentInfo.dSampleRate) * pq.s,
                        sampling_rate=float(pdwSegmentInfo.dSampleRate) *
                        pq.Hz,
                        name=str(entityInfo.szEntityLabel),
                    )
                seg.spiketrains.append(sptr)

            # neuralevent
            if entity_types[
                    entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo(hFile, dwEntityID,
                                            ctypes.byref(pNeuralInfo),
                                            ctypes.sizeof(pNeuralInfo))
                #print pNeuralInfo.dwSourceUnitID , pNeuralInfo.szProbeInfo
                if lazy:
                    times = [] * pq.s
                else:
                    pData = np.zeros((entityInfo.dwItemCount, ), dtype='f8')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData(
                        hFile, dwEntityID, dwStartIndex, dwIndexCount,
                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData * pq.s
                sptr = SpikeTrain(
                    times,
                    name=str(entityInfo.szEntityLabel),
                )
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 5
0
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = EventArray(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

                anaSig = AnalogSignal(signal,
                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
                                                    t_start = pdTime.value * pq.s,
                                                    name = str(entityInfo.szEntityLabel),
                                                    )
                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append( anaSig )


            #segment
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )

                if lazy:
                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp  = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID= ctypes.c_uint32(0)

                    nsample  = int(dwDataBufferSize)
                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                                ctypes.byref(pdwUnitID ) )

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
                    
                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
                                        t_stop = times.max(),
                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
                                        name = str(entityInfo.szEntityLabel),
                                        )
                seg.spiketrains.append(sptr)


            # neuralevent
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [ ]*pq.s
                    t_stop = 0*pq.s
                else:
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData*pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(times, t_stop =t_stop,
                                                name = str(entityInfo.szEntityLabel),)
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 6
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)

            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            global_t_start = None
            # Step 1 : first loop for counting - tsq file
            tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
            hr = HeaderReader(tsq, TsqDescription)
            allsig = { }
            allspiketr = { }
            allevent = { }
            while 1:
                h= hr.read_f()
                if h==None:break

                channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                if Types[evtype] == 'EVTYPE_UNKNOWN':
                    pass

                elif Types[evtype] == 'EVTYPE_MARK' :
                    if global_t_start is None:
                        global_t_start = h['timestamp']

                elif Types[evtype] == 'EVTYPE_SCALER' :
                    # TODO
                    pass

                elif Types[evtype] == 'EVTYPE_STRON' or \
                     Types[evtype] == 'EVTYPE_STROFF':
                    # EVENTS

                    if code not in allevent:
                        allevent[code] = { }
                    if channel not in allevent[code]:
                        ea = EventArray(name = code , channel_index = channel)
                        # for counting:
                        ea.lazy_shape = 0
                        ea.maxlabelsize = 0


                        allevent[code][channel] = ea

                    allevent[code][channel].lazy_shape += 1
                    strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    strobe = str(strobe)
                    if len(strobe)>= allevent[code][channel].maxlabelsize:
                        allevent[code][channel].maxlabelsize = len(strobe)

                    #~ ev = Event()
                    #~ ev.time = h['timestamp'] - global_t_start
                    #~ ev.name = code
                     #~ # it the strobe attribute masked with eventoffset
                    #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    #~ ev.label = str(strobe)
                    #~ seg._events.append( ev )

                elif Types[evtype] == 'EVTYPE_SNIP' :

                    if code not in allspiketr:
                        allspiketr[code] = { }
                    if channel not in allspiketr[code]:
                        allspiketr[code][channel] = { }
                    if h['sortcode'] not in allspiketr[code][channel]:





                        sptr = SpikeTrain([ ], units = 's',
                                                        name = str(h['sortcode']),
                                                        #t_start = global_t_start,
                                                        t_start = 0.*pq.s,
                                                        t_stop = 0.*pq.s, # temporary
                                                        left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
                                                        sampling_rate = h['frequency'] * pq.Hz,

                                                        )
                        #~ sptr.channel = channel
                        #sptr.annotations['channel_index'] = channel
                        sptr.annotate(channel_index = channel)

                        # for counting:
                        sptr.lazy_shape = 0
                        sptr.pos = 0
                        sptr.waveformsize = h['size']-10

                        #~ sptr.name = str(h['sortcode'])
                        #~ sptr.t_start = global_t_start
                        #~ sptr.sampling_rate = h['frequency']
                        #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.waveformsize = h['size']-10

                        allspiketr[code][channel][h['sortcode']] = sptr

                    allspiketr[code][channel][h['sortcode']].lazy_shape += 1

                elif Types[evtype] == 'EVTYPE_STREAM':
                    if code not in allsig:
                        allsig[code] = { }
                    if channel not in allsig[code]:
                        #~ print 'code', code, 'channel',  channel
                        anaSig = AnalogSignal([] * pq.V,
                                              name=code,
                                              sampling_rate=
                                              h['frequency'] * pq.Hz,
                                              t_start=(h['timestamp'] -
                                                       global_t_start) * pq.s,
                                              channel_index=channel)
                        anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']])
                        anaSig.pos = 0

                        # for counting:
                        anaSig.lazy_shape = 0
                        #~ anaSig.pos = 0
                        allsig[code][channel] = anaSig
                    allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize

            if not lazy:
                # Step 2 : allocate memory
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V )
                        v[channel].pos = 0

                for code, v in iteritems(allevent):
                    for channel, ea in iteritems(v):
                        ea.times = np.empty( (ea.lazy_shape)  ) * pq.s
                        ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) )
                        ea.pos = 0

                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s ,
                                                            name = sptr.name,
                                                            t_start = sptr.t_start,
                                                            t_stop = sptr.t_stop,
                                                            left_sweep = sptr.left_sweep,
                                                            sampling_rate = sptr.sampling_rate,
                                                            waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV ,
                                                        )
                            new.annotations.update(sptr.annotations)
                            new.pos = 0
                            new.waveformsize = sptr.waveformsize
                            allsorted[sortcode] = new

                # Step 3 : searh sev (individual data files) or tev (common data file)
                # sev is for version > 70
                if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                    tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb')
                else:
                    tev = None
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        if PY3K:
                            signame = anaSig.name.decode('ascii')
                        else:
                            signame = anaSig.name
                        filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev')
                        if os.path.exists(filename):
                            anaSig.fid = open(filename, 'rb')
                        else:
                            anaSig.fid = tev
                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            sptr.fid = tev

                # Step 4 : second loop for copyin chunk of data
                tsq.seek(0)
                while 1:
                    h= hr.read_f()
                    if h==None:break
                    channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                    if Types[evtype] == 'EVTYPE_STREAM':
                        a = allsig[code][channel]
                        dt = a.dtype
                        s = int((h['size']*4-40)/dt.itemsize)
                        a.fid.seek(h['eventoffset'])
                        a[ a.pos:a.pos+s ]  = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype)
                        a.pos += s

                    elif Types[evtype] == 'EVTYPE_STRON' or \
                        Types[evtype] == 'EVTYPE_STROFF':
                        ea = allevent[code][channel]
                        ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s
                        strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                        ea.labels[ea.pos] = str(strobe)
                        ea.pos += 1

                    elif Types[evtype] == 'EVTYPE_SNIP':
                        sptr = allspiketr[code][channel][h['sortcode']]
                        sptr.t_stop =  (h['timestamp'] - global_t_start) * pq.s
                        sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s
                        sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V
                        sptr.pos += 1


            # Step 5 : populating segment
            for code, v in iteritems(allsig):
                for channel, anaSig in iteritems(v):
                    seg.analogsignals.append( anaSig )

            for code, v in iteritems(allevent):
                for channel, ea in iteritems(v):
                    seg.eventarrays.append( ea )


            for code, v in iteritems(allspiketr):
                for channel, allsorted in iteritems(v):
                    for sortcode, sptr in iteritems(allsorted):
                        seg.spiketrains.append( sptr )

        create_many_to_one_relationship(bl)
        return bl
Ejemplo n.º 7
0
    def readOneChannelEventOrSpike(self , fid, channel_num, header ,lazy = True):
        # return SPikeTrain or EventArray
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock <0: return
        if channelHeader.kind not in [2, 3, 4 , 5 , 6 ,7, 8]: return

        ## Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick' , 'i4') ]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick' , 'i4') , ('marker' , 'i4') ]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick' , 'i4') , ('marker' , 'i4')  , ('adc' , 'S%d' %channelHeader.n_extra   )]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick' , 'i4') , ('marker' , 'i4')  , ('real' , 'S%d' %channelHeader.n_extra   )]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick' , 'i4') , ('marker' , 'i4')  ,  ('label' , 'S%d'%channelHeader.n_extra)]
        dt = np.dtype(fmt)


        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks) :
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0 :
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems
        
        if lazy :
            if channelHeader.kind in [2, 3, 4 , 5 , 8]:
                ea = EventArray(  )
                ea.annotate(channel_index = channel_num)
                ea.lazy_shape = totalitems
                return ea

            elif channelHeader.kind in [6 ,7]:
                sptr = SpikeTrain([ ]*pq.s, t_stop=1e99)  # correct value for t_stop to be put in later
                sptr.annotate(channel_index = channel_num)
                sptr.lazy_shape = totalitems
                return sptr

        else:
            alltrigs = np.zeros( totalitems , dtype = dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks) :
                blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring( fid.read( blockHeader.items*dt.itemsize)  , dtype = dt)

                alltrigs[pos:pos+trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0 :
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class : eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype('f')*header.us_per_time * header.dtime_base*pq.s

            if channelHeader.kind in [2, 3, 4 , 5 , 8]:
                #events
                ea = EventArray(  )
                ea.annotate(channel_index = channel_num)
                ea.times = alltimes
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels = alltrigs['label'])
                return ea

            elif channelHeader.kind in [6 ,7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6 :
                    waveforms = np.fromstring(alltrigs['adc'].tostring() , dtype = 'i2')
                    waveforms = waveforms.astype('f4') *channelHeader.scale/ 6553.6 + channelHeader.offset
                elif channelHeader.kind == 7 :
                    waveforms = np.fromstring(alltrigs['real'].tostring() , dtype = 'f4')


                if header.system_id>=6 and channelHeader.interleave>1:
                    waveforms = waveforms.reshape((alltimes.size,-1,channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1,2)
                else:
                    waveforms = waveforms.reshape(( alltimes.size,1, -1))


                if header.system_id in [1,2,3,4,5]:
                    sample_interval = (channelHeader.divide*header.us_per_time*header.time_per_adc)*1e-6
                else :
                    sample_interval = (channelHeader.l_chan_dvd*header.us_per_time*header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit] )
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit )
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    t_stop = alltimes.max() # can get better value from associated AnalogSignal(s) ?
                else:
                    t_stop = 0.0
                sptr = SpikeTrain(alltimes,
                                            waveforms = waveforms*unit,
                                            sampling_rate = (1./sample_interval)*pq.Hz,
                                            t_stop = t_stop
                                            )
                sptr.annotate(channel_index = channel_num)

                return sptr
Ejemplo n.º 8
0
    def read_segment(self, cascade = True, lazy = False,):
        """
        Arguments:
        """
        f = struct_file(self.filename, 'rb')

        #Name
        f.seek(64,0)
        surname = f.read(22)
        while surname[-1] == ' ' :
            if len(surname) == 0 :break
            surname = surname[:-1]
        firstname = f.read(20)
        while firstname[-1] == ' ' :
            if len(firstname) == 0 :break
            firstname = firstname[:-1]

        #Date
        f.seek(128,0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year+1900 , month , day, hour, minute, sec)

        f.seek(138,0)
        Data_Start_Offset , Num_Chan , Multiplexer , Rate_Min , Bytes = f.read_f('IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175,0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(  name = firstname+' '+surname,
                                    file_origin = os.path.basename(self.filename),
                                    )
        seg.annotate(surname = surname)
        seg.annotate(firstname = firstname)
        seg.annotate(rec_datetime = rec_datetime)

        if not cascade:
            return seg

        # area
        f.seek(176,0)
        zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E', 'MONTAGE',
                'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A', 'EVENT B', 'TRIGGER']
        zones = { }
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset,0)
            rawdata = np.fromstring(f.read() , dtype = 'u'+str(Bytes))
            rawdata = rawdata.reshape(( rawdata.size/Num_Chan , Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos,0)
        code = np.fromfile(f, dtype='u2', count=Num_Chan)

        units = {-1: pq.nano*pq.V, 0:pq.uV, 1:pq.mV, 2:1, 100: pq.percent,  101:pq.dimensionless, 102:pq.dimensionless}

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos+code[c]*128+2,0)

            label = f.read(6).strip("\x00")
            ground = f.read(6).strip("\x00")
            logical_min , logical_max, logical_ground, physical_min, physical_max = f.read_f('iiiii')
            k, = f.read_f('h')
            if k in units.keys() :
                unit = units[k]
            else :
                unit = pq.uV

            f.seek(8,1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [ ]*unit
            else:
                factor = float(physical_max - physical_min) / float(logical_max-logical_min+1)
                signal = ( rawdata[:,c].astype('f') - logical_ground )* factor*unit

            anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                  name=label, channel_index=c)
            if lazy:
                anaSig.lazy_shape = None
            anaSig.annotate(ground = ground)

            seg.analogsignals.append( anaSig )


        sampling_rate = np.mean([ anaSig.sampling_rate for anaSig in seg.analogsignals ])*pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [ ('TRIGGER', 'u2'), ('NOTE', 'S40') ]:
            zname2, pos, length = zones[zname]
            f.seek(pos,0)
            triggers = np.fromstring(f.read(length) , dtype = [('pos','u4'), ('label', label_dtype)] ,  )
            ea = EventArray(name =zname[0]+zname[1:].lower())
            if not lazy:
                keep = (triggers['pos']>=triggers['pos'][0]) & (triggers['pos']<rawdata.shape[0]) & (triggers['pos']!=0)
                triggers = triggers[keep]
                ea.labels = triggers['label'].astype('S')
                ea.times = (triggers['pos']/sampling_rate).rescale('s')
            else:
                ea.lazy_shape = triggers.size
            seg.eventarrays.append(ea)
        
        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos,0)
            epochs = np.fromstring(f.read(length) , 
                            dtype = [('label','u4'),('start','u4'),('stop','u4'),]  )
            ep = EpochArray(name =zname[0]+zname[1:].lower())
            if not lazy:
                keep = (epochs['start']>0) & (epochs['start']<rawdata.shape[0]) & (epochs['stop']<rawdata.shape[0])
                epochs = epochs[keep]
                ep.labels = epochs['label'].astype('S')
                ep.times = (epochs['start']/sampling_rate).rescale('s')
                ep.durations = ((epochs['stop'] - epochs['start'])/sampling_rate).rescale('s')
            else:
                ep.lazy_shape = triggers.size
            seg.epocharrays.append(ep)
        
        
        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 9
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)

            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            global_t_start = None
            # Step 1 : first loop for counting - tsq file
            tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
            hr = HeaderReader(tsq, TsqDescription)
            allsig = { }
            allspiketr = { }
            allevent = { }
            while 1:
                h= hr.read_f()
                if h==None:break

                channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                if Types[evtype] == 'EVTYPE_UNKNOWN':
                    pass

                elif Types[evtype] == 'EVTYPE_MARK' :
                    if global_t_start is None:
                        global_t_start = h['timestamp']

                elif Types[evtype] == 'EVTYPE_SCALER' :
                    # TODO
                    pass

                elif Types[evtype] == 'EVTYPE_STRON' or \
                     Types[evtype] == 'EVTYPE_STROFF':
                    # EVENTS

                    if code not in allevent:
                        allevent[code] = { }
                    if channel not in allevent[code]:
                        ea = EventArray(name = code , channel_index = channel)
                        # for counting:
                        ea.lazy_shape = 0
                        ea.maxlabelsize = 0


                        allevent[code][channel] = ea

                    allevent[code][channel].lazy_shape += 1
                    strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    strobe = str(strobe)
                    if len(strobe)>= allevent[code][channel].maxlabelsize:
                        allevent[code][channel].maxlabelsize = len(strobe)

                    #~ ev = Event()
                    #~ ev.time = h['timestamp'] - global_t_start
                    #~ ev.name = code
                     #~ # it the strobe attribute masked with eventoffset
                    #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    #~ ev.label = str(strobe)
                    #~ seg._events.append( ev )

                elif Types[evtype] == 'EVTYPE_SNIP' :

                    if code not in allspiketr:
                        allspiketr[code] = { }
                    if channel not in allspiketr[code]:
                        allspiketr[code][channel] = { }
                    if h['sortcode'] not in allspiketr[code][channel]:





                        sptr = SpikeTrain([ ], units = 's',
                                                        name = str(h['sortcode']),
                                                        #t_start = global_t_start,
                                                        t_start = 0.*pq.s,
                                                        t_stop = 0.*pq.s, # temporary
                                                        left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
                                                        sampling_rate = h['frequency'] * pq.Hz,

                                                        )
                        #~ sptr.channel = channel
                        #sptr.annotations['channel_index'] = channel
                        sptr.annotate(channel_index = channel)

                        # for counting:
                        sptr.lazy_shape = 0
                        sptr.pos = 0
                        sptr.waveformsize = h['size']-10

                        #~ sptr.name = str(h['sortcode'])
                        #~ sptr.t_start = global_t_start
                        #~ sptr.sampling_rate = h['frequency']
                        #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.waveformsize = h['size']-10

                        allspiketr[code][channel][h['sortcode']] = sptr

                    allspiketr[code][channel][h['sortcode']].lazy_shape += 1

                elif Types[evtype] == 'EVTYPE_STREAM':
                    if code not in allsig:
                        allsig[code] = { }
                    if channel not in allsig[code]:
                        #~ print 'code', code, 'channel',  channel
                        anaSig = AnalogSignal([] * pq.V,
                                              name=code,
                                              sampling_rate=
                                              h['frequency'] * pq.Hz,
                                              t_start=(h['timestamp'] -
                                                       global_t_start) * pq.s,
                                              channel_index=channel)
                        anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']])
                        anaSig.pos = 0

                        # for counting:
                        anaSig.lazy_shape = 0
                        #~ anaSig.pos = 0
                        allsig[code][channel] = anaSig
                    allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize

            if not lazy:
                # Step 2 : allocate memory
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V )
                        v[channel].pos = 0

                for code, v in iteritems(allevent):
                    for channel, ea in iteritems(v):
                        ea.times = np.empty( (ea.lazy_shape)  ) * pq.s
                        ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) )
                        ea.pos = 0

                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s ,
                                                            name = sptr.name,
                                                            t_start = sptr.t_start,
                                                            t_stop = sptr.t_stop,
                                                            left_sweep = sptr.left_sweep,
                                                            sampling_rate = sptr.sampling_rate,
                                                            waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV ,
                                                        )
                            new.annotations.update(sptr.annotations)
                            new.pos = 0
                            new.waveformsize = sptr.waveformsize
                            allsorted[sortcode] = new

                # Step 3 : searh sev (individual data files) or tev (common data file)
                # sev is for version > 70
                if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                    tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb')
                else:
                    tev = None
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        if PY3K:
                            signame = anaSig.name.decode('ascii')
                        else:
                            signame = anaSig.name
                        filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev')
                        if os.path.exists(filename):
                            anaSig.fid = open(filename, 'rb')
                        else:
                            anaSig.fid = tev
                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            sptr.fid = tev

                # Step 4 : second loop for copyin chunk of data
                tsq.seek(0)
                while 1:
                    h= hr.read_f()
                    if h==None:break
                    channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                    if Types[evtype] == 'EVTYPE_STREAM':
                        a = allsig[code][channel]
                        dt = a.dtype
                        s = int((h['size']*4-40)/dt.itemsize)
                        a.fid.seek(h['eventoffset'])
                        a[ a.pos:a.pos+s ]  = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype)
                        a.pos += s

                    elif Types[evtype] == 'EVTYPE_STRON' or \
                        Types[evtype] == 'EVTYPE_STROFF':
                        ea = allevent[code][channel]
                        ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s
                        strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                        ea.labels[ea.pos] = str(strobe)
                        ea.pos += 1

                    elif Types[evtype] == 'EVTYPE_SNIP':
                        sptr = allspiketr[code][channel][h['sortcode']]
                        sptr.t_stop =  (h['timestamp'] - global_t_start) * pq.s
                        sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s
                        sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V
                        sptr.pos += 1


            # Step 5 : populating segment
            for code, v in iteritems(allsig):
                for channel, anaSig in iteritems(v):
                    seg.analogsignals.append( anaSig )

            for code, v in iteritems(allevent):
                for channel, ea in iteritems(v):
                    seg.eventarrays.append( ea )


            for code, v in iteritems(allspiketr):
                for channel, allsorted in iteritems(v):
                    for sortcode, sptr in iteritems(allsorted):
                        seg.spiketrains.append( sptr )

        bl.create_many_to_one_relationship()
        return bl
Ejemplo n.º 10
0
    def read_segment(self,
                     # the 2 first keyword arguments are imposed by neo.io API
                     lazy = False,
                     cascade = True,
                     # all following arguments are decied by this IO and are free
                     segment_duration = 15.,
                     num_analogsignal = 4,
                     num_spiketrain_by_channel = 3,
                    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000. #Hz
        t_start = -1.


        #time vector for generated signal
        timevect = np.arange(t_start, t_start+ segment_duration , 1./sampling_rate)

        # create an empty segment
        seg = Segment( name = 'it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
                                            channel_index = i ,segment_duration = segment_duration, t_start = t_start)
                seg.analogsignals += [ ana ]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(lazy = lazy , cascade = cascade ,
                                                            segment_duration = segment_duration, t_start = t_start , channel_index = i)
                    seg.spiketrains += [ sptr ]


            # create an EventArray that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to EventArray
            # for that you need read_segment(cascade = True)
            eva = EventArray()
            if lazy:
                # in lazy case no data are readed
                # eva is empty
                pass
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva.times = timevect[(np.random.rand(n)*timevect.size).astype('i')]* pq.s
                # all duration are the same
                eva.durations = np.ones(n)*500*pq.ms
                # label
                l = [ ]
                for i in range(n):
                    if np.random.rand()>.6: l.append( 'TriggerA' )
                    else : l.append( 'TriggerB' )
                eva.labels = np.array( l )

            seg.eventarrays += [ eva ]

        seg.create_many_to_one_relationship()
        return seg