def _handle_processing_group(self, block):
     # todo: handle other modules than Units
     units_group = self._file.get('processing/Units/UnitTimes')
     segment_map = dict(
         (segment.name, segment) for segment in block.segments)
     for name, group in units_group.items():
         if name == 'unit_list':
             pass  # todo
         else:
             segment_name = group['source'].value
             #desc = group['unit_description'].value  # use this to store Neo Unit id?
             segment = segment_map[segment_name]
             if self._lazy:
                 times = np.array(())
                 lazy_shape = group['times'].shape
             else:
                 times = group['times'].value
             spiketrain = SpikeTrain(
                 times,
                 units=pq.second,
                 t_stop=group['t_stop'].value * pq.second
             )  # todo: this is a custom Neo value, general NWB files will not have this - use segment.t_stop instead in that case?
             if self._lazy:
                 spiketrain.lazy_shape = lazy_shape
             spiketrain.segment = segment
             segment.spiketrains.append(spiketrain)
Exemplo n.º 2
0
    def read_spiketrain(self ,
                                            # the 2 first key arguments are imposed by neo.io API
                                            lazy = False,
                                            cascade = True,

                                                segment_duration = 15.,
                                                t_start = -1,
                                                channel_index = 0,
                                                ):
        """
        With this IO SpikeTrain can e acces directly with its channel number
        """
        # There are 2 possibles behaviour for a SpikeTrain
        # holding many Spike instance or directly holding spike times
        # we choose here the first :
        if not HAVE_SCIPY:
            raise SCIPY_ERR

        num_spike_by_spiketrain = 40
        sr = 10000.

        if lazy:
            times = [ ]
        else:
            times = (np.random.rand(num_spike_by_spiketrain)*segment_duration +
                     t_start)

        # create a spiketrain
        spiketr = SpikeTrain(times, t_start = t_start*pq.s, t_stop = (t_start+segment_duration)*pq.s ,
                                            units = pq.s,
                                            name = 'it is a spiketrain from exampleio',
                                            )

        if lazy:
            # we add the attribute lazy_shape with the size if loaded
            spiketr.lazy_shape = (num_spike_by_spiketrain,)

        # ours spiketrains also hold the waveforms:

        # 1 generate a fake spike shape (2d array if trodness >1)
        w1 = -stats.nct.pdf(np.arange(11,60,4), 5,20)[::-1]/3.
        w2 = stats.nct.pdf(np.arange(11,60,2), 5,20)
        w = np.r_[ w1 , w2 ]
        w = -w/max(w)

        if not lazy:
            # in the neo API the waveforms attr is 3 D in case tetrode
            # in our case it is mono electrode so dim 1 is size 1
            waveforms  = np.tile( w[np.newaxis,np.newaxis,:], ( num_spike_by_spiketrain ,1, 1) )
            waveforms *=  np.random.randn(*waveforms.shape)/6+1
            spiketr.waveforms = waveforms*pq.mV
            spiketr.sampling_rate = sr * pq.Hz
            spiketr.left_sweep = 1.5* pq.s

        # for attributes out of neo you can annotate
        spiketr.annotate(channel_index = channel_index)

        return spiketr
Exemplo n.º 3
0
    def read_spiketrain(self ,
                                            # the 2 first key arguments are imposed by neo.io API
                                            lazy = False,
                                            cascade = True,

                                                segment_duration = 15.,
                                                t_start = -1,
                                                channel_index = 0,
                                                ):
        """
        With this IO SpikeTrain can e acces directly with its channel number
        """
        # There are 2 possibles behaviour for a SpikeTrain
        # holding many Spike instance or directly holding spike times
        # we choose here the first :
        if not HAVE_SCIPY:
            raise SCIPY_ERR

        num_spike_by_spiketrain = 40
        sr = 10000.

        if lazy:
            times = [ ]
        else:
            times = (np.random.rand(num_spike_by_spiketrain)*segment_duration +
                     t_start)

        # create a spiketrain
        spiketr = SpikeTrain(times, t_start = t_start*pq.s, t_stop = (t_start+segment_duration)*pq.s ,
                                            units = pq.s,
                                            name = 'it is a spiketrain from exampleio',
                                            )

        if lazy:
            # we add the attribute lazy_shape with the size if loaded
            spiketr.lazy_shape = (num_spike_by_spiketrain,)

        # ours spiketrains also hold the waveforms:

        # 1 generate a fake spike shape (2d array if trodness >1)
        w1 = -stats.nct.pdf(np.arange(11,60,4), 5,20)[::-1]/3.
        w2 = stats.nct.pdf(np.arange(11,60,2), 5,20)
        w = np.r_[ w1 , w2 ]
        w = -w/max(w)

        if not lazy:
            # in the neo API the waveforms attr is 3 D in case tetrode
            # in our case it is mono electrode so dim 1 is size 1
            waveforms  = np.tile( w[np.newaxis,np.newaxis,:], ( num_spike_by_spiketrain ,1, 1) )
            waveforms *=  np.random.randn(*waveforms.shape)/6+1
            spiketr.waveforms = waveforms*pq.mV
            spiketr.sampling_rate = sr * pq.Hz
            spiketr.left_sweep = 1.5* pq.s

        # for attributes out of neo you can annotate
        spiketr.annotate(channel_index = channel_index)

        return spiketr
Exemplo n.º 4
0
 def _read_spiketrain(self, node, parent):
     attributes = self._get_standard_attributes(node)
     t_start = self._get_quantity(node["t_start"])
     t_stop = self._get_quantity(node["t_stop"])
     # todo: handle sampling_rate, waveforms, left_sweep
     spiketrain = SpikeTrain(self._get_quantity(node["times"]),
                             t_start=t_start, t_stop=t_stop,
                             **attributes)
     spiketrain.segment = parent
     if self._lazy:
         spiketrain.lazy_shape = node["times"].shape
     self.object_refs[node.attrs["object_ref"]] = spiketrain
     return spiketrain
Exemplo n.º 5
0
 def _read_spiketrain(self, node, parent):
     attributes = self._get_standard_attributes(node)
     t_start = self._get_quantity(node["t_start"])
     t_stop = self._get_quantity(node["t_stop"])
     # todo: handle sampling_rate, waveforms, left_sweep
     spiketrain = SpikeTrain(self._get_quantity(node["times"]),
                             t_start=t_start,
                             t_stop=t_stop,
                             **attributes)
     spiketrain.segment = parent
     if self._lazy:
         spiketrain.lazy_shape = node["times"].shape
     self.object_refs[node.attrs["object_ref"]] = spiketrain
     return spiketrain
Exemplo n.º 6
0
 def _extract_spikes(self, data, metadata, channel_index, lazy):
     spiketrain = None
     if lazy:
         if channel_index in data[:, 1]:
             spiketrain = SpikeTrain([], units=pq.ms, t_stop=0.0)
             spiketrain.lazy_shape = None
     else:
         spike_times = self._extract_array(data, channel_index)
         if len(spike_times) > 0:
             spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())
     if spiketrain is not None:
         spiketrain.annotate(label=metadata["label"],
                             channel_index=channel_index,
                             dt=metadata["dt"])
         return spiketrain
Exemplo n.º 7
0
 def _extract_spikes(self, data, metadata, channel_index, lazy):
     spiketrain = None
     if lazy:
         if channel_index in data[:, 1]:
             spiketrain = SpikeTrain([], units=pq.ms, t_stop=0.0)
             spiketrain.lazy_shape = None
     else:
         spike_times = self._extract_array(data, channel_index)
         if len(spike_times) > 0:
             spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())
     if spiketrain is not None:
         spiketrain.annotate(label=metadata["label"],
                             channel_index=channel_index,
                             dt=metadata["dt"])
         return spiketrain
Exemplo n.º 8
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
        delimiter='\t',
        t_start=0. * pq.s,
        unit=pq.s,
    ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            t_start : time start of all spiketrain 0 by default
            unit : unit of spike times, can be a str or directly a Quantities
        """
        unit = pq.Quantity(1, unit)

        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        f = open(self.filename, 'Ur')
        for i, line in enumerate(f):
            alldata = line[:-1].split(delimiter)
            if alldata[-1] == '': alldata = alldata[:-1]
            if alldata[0] == '': alldata = alldata[1:]
            if lazy:
                spike_times = []
                t_stop = t_start
            else:
                spike_times = np.array(alldata).astype('f')
                t_stop = spike_times.max() * unit

            sptr = SpikeTrain(spike_times * unit,
                              t_start=t_start,
                              t_stop=t_stop)
            if lazy:
                sptr.lazy_shape = len(alldata)

            sptr.annotate(channel_index=i)
            seg.spiketrains.append(sptr)
        f.close()

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 9
0
    def __save_segment(self):
        '''
        Write the segment to the Block if it exists
        '''
        # if this is the beginning of the first condition, then we don't want
        # to save, so exit
        # but set __seg from None to False so we know next time to create a
        # segment even if there are no spike in the condition
        if self.__seg is None:
            self.__seg = False
            return

        if not self.__seg:
            # create dummy values if there are no SpikeTrains in this condition
            self.__seg = Segment(file_origin=self._filename, **self.__params)
            self.__spiketimes = []

        if self.__lazy:
            train = SpikeTrain(pq.Quantity([], dtype=np.float32, units=pq.ms),
                               t_start=0 * pq.ms,
                               t_stop=self.__t_stop * pq.ms,
                               file_origin=self._filename)
            train.lazy_shape = len(self.__spiketimes)
        else:
            times = pq.Quantity(self.__spiketimes,
                                dtype=np.float32,
                                units=pq.ms)
            train = SpikeTrain(times,
                               t_start=0 * pq.ms,
                               t_stop=self.__t_stop * pq.ms,
                               file_origin=self._filename)

        self.__seg.spiketrains = [train]
        self.__unit.spiketrains.append(train)
        self._blk.segments.append(self.__seg)

        # set an empty segment
        # from now on, we need to set __seg to False rather than None so
        # that if there is a condition with no SpikeTrains we know
        # to create an empty Segment
        self.__seg = False
Exemplo n.º 10
0
    def read_segment(self,
                            lazy = False,
                            cascade = True,
                            delimiter = '\t',
                            t_start = 0.*pq.s,
                            unit = pq.s,
                            ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            t_start : time start of all spiketrain 0 by default
            unit : unit of spike times, can be a str or directly a Quantities
        """
        unit = pq.Quantity(1, unit)

        seg = Segment(file_origin = os.path.basename(self.filename))
        if not cascade:
            return seg

        f = open(self.filename, 'Ur')
        for i,line in enumerate(f) :
            alldata = line[:-1].split(delimiter)
            if alldata[-1] == '': alldata = alldata[:-1]
            if alldata[0] == '': alldata = alldata[1:]
            if lazy:
                spike_times = [ ]
                t_stop = t_start
            else:
                spike_times = np.array(alldata).astype('f')
                t_stop = spike_times.max()*unit

            sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop)
            if lazy:
                sptr.lazy_shape = len(alldata)

            sptr.annotate(channel_index = i)
            seg.spiketrains.append(sptr)
        f.close()

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 11
0
    def __save_segment(self):
        '''
        Write the segment to the Block if it exists
        '''
        # if this is the beginning of the first condition, then we don't want
        # to save, so exit
        # but set __seg from None to False so we know next time to create a
        # segment even if there are no spike in the condition
        if self.__seg is None:
            self.__seg = False
            return

        if not self.__seg:
            # create dummy values if there are no SpikeTrains in this condition
            self.__seg = Segment(file_origin=self._filename,
                                 **self.__params)
            self.__spiketimes = []

        if self.__lazy:
            train = SpikeTrain(pq.Quantity([], dtype=np.float32,
                                           units=pq.ms),
                               t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
                               file_origin=self._filename)
            train.lazy_shape = len(self.__spiketimes)
        else:
            times = pq.Quantity(self.__spiketimes, dtype=np.float32,
                                units=pq.ms)
            train = SpikeTrain(times,
                               t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
                               file_origin=self._filename)

        self.__seg.spiketrains = [train]
        self.__unit.spiketrains.append(train)
        self._blk.segments.append(self.__seg)

        # set an empty segment
        # from now on, we need to set __seg to False rather than None so
        # that if there is a condition with no SpikeTrains we know
        # to create an empty Segment
        self.__seg = False
 def _handle_processing_group(self, block):
     # todo: handle other modules than Units
     units_group = self._file.get('processing/Units/UnitTimes')
     segment_map = dict((segment.name, segment) for segment in block.segments)
     for name, group in units_group.items():
         if name == 'unit_list':
             pass  # todo
         else:
             segment_name = group['source'].value
             #desc = group['unit_description'].value  # use this to store Neo Unit id?
             segment = segment_map[segment_name]
             if self._lazy:
                 times = np.array(())
                 lazy_shape = group['times'].shape
             else:
                 times = group['times'].value
             spiketrain = SpikeTrain(times, units=pq.second,
                                     t_stop=group['t_stop'].value*pq.second)  # todo: this is a custom Neo value, general NWB files will not have this - use segment.t_stop instead in that case?
             if self._lazy:
                 spiketrain.lazy_shape = lazy_shape
             spiketrain.segment = segment
             segment.spiketrains.append(spiketrain)
Exemplo n.º 13
0
    def read_segment(self,
                     import_neuroshare_segment=True,
                     lazy=False,
                     cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment(file_origin=os.path.basename(self.filename), )

        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)

        #elif sys.platform.startswith('darwin'):

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info))
        seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' +
                     str(info.dwAPIVersionMin))

        if not cascade:
            return seg

        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename),
                               ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo),
                                  ctypes.sizeof(fileinfo))

        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo(hFile, dwEntityID,
                                        ctypes.byref(entityInfo),
                                        ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo(hFile, dwEntityID,
                                           ctypes.byref(pEventInfo),
                                           ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0:  #TEXT
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:  #CVS
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:  # 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:  # 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:  # 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name=str(entityInfo.szEntityLabel), )
                if not lazy:
                    times = []
                    labels = []
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetEventData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize))
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times * pq.s
                    ea.labels = np.array(labels, dtype='S')
                else:
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo(hFile, dwEntityID,
                                            ctypes.byref(pAnalogInfo),
                                            ctypes.sizeof(pAnalogInfo))
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [] * pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    total_read = 0
                    while total_read < entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount -
                                                      total_read)

                        neuroshare.ns_GetAnalogData(
                            hFile, dwEntityID, dwStartIndex, dwStopIndex,
                            ctypes.byref(pdwContCount),
                            pData[total_read:].ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value

                    signal = pq.Quantity(pData,
                                         units=pAnalogInfo.szUnits,
                                         copy=False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex,
                                             ctypes.byref(pdTime))

                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pAnalogInfo.dSampleRate * pq.Hz,
                    t_start=pdTime.value * pq.s,
                    name=str(entityInfo.szEntityLabel),
                )
                anaSig.annotate(probe_info=str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append(anaSig)

            #segment
            if entity_types[
                    entityInfo.
                    dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo(hFile, dwEntityID,
                                             ctypes.byref(pdwSegmentInfo),
                                             ctypes.sizeof(pdwSegmentInfo))
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)

                for dwSourceID in range(pdwSegmentInfo.dwSourceCount):
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo(
                        hFile, dwEntityID, dwSourceID,
                        ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo))

                if lazy:
                    sptr = SpikeTrain(times,
                                      name=str(entityInfo.szEntityLabel),
                                      t_stop=0. * pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount
                    pData = np.zeros((dwDataBufferSize), dtype='float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID = ctypes.c_uint32(0)

                    nsample = int(dwDataBufferSize)
                    times = np.empty((entityInfo.dwItemCount), dtype='f')
                    waveforms = np.empty(
                        (entityInfo.dwItemCount, nsource, nsample), dtype='f')
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetSegmentData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp),
                            pData.ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                            ctypes.byref(pdwUnitID))

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[
                            dwIndex, :, :] = pData[:nsample * nsource].reshape(
                                nsample, nsource).transpose()

                    sptr = SpikeTrain(
                        times=pq.Quantity(times, units='s', copy=False),
                        t_stop=times.max(),
                        waveforms=pq.Quantity(waveforms,
                                              units=str(
                                                  pdwSegmentInfo.szUnits),
                                              copy=False),
                        left_sweep=nsample / 2. /
                        float(pdwSegmentInfo.dSampleRate) * pq.s,
                        sampling_rate=float(pdwSegmentInfo.dSampleRate) *
                        pq.Hz,
                        name=str(entityInfo.szEntityLabel),
                    )
                seg.spiketrains.append(sptr)

            # neuralevent
            if entity_types[
                    entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo(hFile, dwEntityID,
                                            ctypes.byref(pNeuralInfo),
                                            ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [] * pq.s
                    t_stop = 0 * pq.s
                else:
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData(
                        hFile, dwEntityID, dwStartIndex, dwIndexCount,
                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData * pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(
                    times,
                    t_stop=t_stop,
                    name=str(entityInfo.szEntityLabel),
                )
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 14
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """

        """

        fid = open(self.filename, "rb")
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader["Year"],
            globalHeader["Month"],
            globalHeader["Day"],
            globalHeader["Hour"],
            globalHeader["Minute"],
            globalHeader["Second"],
        )
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(plexon_version=globalHeader["Version"])

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = sipkes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader["NumDSPChannels"]):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
            channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
            dspChannelHeaders[channelHeader["Channel"]] = channelHeader
            maxunit = max(channelHeader["NUnits"], maxunit)
            maxchan = max(channelHeader["Channel"], maxchan)

        # event channel header
        eventHeaders = {}
        for _ in range(globalHeader["NumEventChannels"]):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader["Channel"]] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader["NumSlowChannels"]):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype="f")

        # spiketimes and waveform
        nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
        wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")

        # eventarrays
        nb_events = {}
        # maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            # maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() != -1:
            # read block header
            dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
            if dataBlockHeader is None:
                break
            chan = dataBlockHeader["Channel"]
            unit = dataBlockHeader["Unit"]
            n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
            time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]

            if dataBlockHeader["Type"] == 1:
                nb_spikes[chan, unit] += 1
                wf_sizes[chan, unit, :] = [n1, n2]
                fid.seek(n1 * n2 * 2, 1)
            elif dataBlockHeader["Type"] == 4:
                # event
                nb_events[chan] += 1
            elif dataBlockHeader["Type"] == 5:
                # continuous signal
                fid.seek(n2 * 2, 1)
                if n2 > 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] == 0:
                    t_starts[chan] = time

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = {}
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])

            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit], dtype="f")
                if load_spike_waveform:
                    n1, n2 = wf_sizes[chan, unit, :]
                    swfarrays[chan, unit] = np.zeros((nb_spikes[chan, unit], n1, n2), dtype="f4")
            pos_spikes = np.zeros(nb_spikes.shape, dtype="i")

            # allocating mem for event
            eventpositions = {}
            evarrays = {}
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = np.zeros(nb, dtype="f")
                eventpositions[chan] = 0

            fid.seek(start)
            while fid.tell() != -1:
                dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
                if dataBlockHeader is None:
                    break
                chan = dataBlockHeader["Channel"]
                n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
                time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]
                time /= globalHeader["ADFrequency"]

                if n2 < 0:
                    break
                if dataBlockHeader["Type"] == 1:
                    # spike
                    unit = dataBlockHeader["Unit"]
                    pos = pos_spikes[chan, unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1 * n2 != 0:
                        swfarrays[chan, unit][pos, :, :] = (
                            np.fromstring(fid.read(n1 * n2 * 2), dtype="i2").reshape(n1, n2).astype("f4")
                        )
                    else:
                        fid.seek(n1 * n2 * 2, 1)
                    pos_spikes[chan, unit] += 1

                elif dataBlockHeader["Type"] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan][pos] = time
                    eventpositions[chan] += 1

                elif dataBlockHeader["Type"] == 5:
                    # signal
                    data = np.fromstring(fid.read(n2 * 2), dtype="i2").astype("f4")
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan] + data.size] = data
                    sample_positions[chan] += data.size

        ## Step 3: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
            else:
                times = evarrays[chan]
            ea = EventArray(times * pq.s, channel_name=eventHeaders[chan]["Name"], channel_index=chan)
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = []
            else:
                if globalHeader["Version"] == 100 or globalHeader["Version"] == 101:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * 1000.0)
                elif globalHeader["Version"] == 102:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * slowChannelHeaders[chan]["PreampGain"])
                elif globalHeader["Version"] >= 103:
                    gain = globalHeader["SlowMaxMagnitudeMV"] / (
                        0.5
                        * (2 ** globalHeader["BitsPerSpikeSample"])
                        * slowChannelHeaders[chan]["Gain"]
                        * slowChannelHeaders[chan]["PreampGain"]
                    )
                signal = sigarrays[chan] * gain
            anasig = AnalogSignal(
                signal * pq.V,
                sampling_rate=float(slowChannelHeaders[chan]["ADFreq"]) * pq.Hz,
                t_start=t_starts[chan] * pq.s,
                channel_index=slowChannelHeaders[chan]["Channel"],
                channel_name=slowChannelHeaders[chan]["Name"],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)

        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0:
                continue
            if lazy:
                times = []
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan, unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader["Version"] < 103:
                        gain = 3000.0 / (2048 * dspChannelHeaders[chan]["Gain"] * 1000.0)
                    elif globalHeader["Version"] >= 103 and globalHeader["Version"] < 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * 1000.0
                        )
                    elif globalHeader["Version"] > 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * globalHeader["SpikePreAmpGain"]
                        )
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(times, units="s", t_stop=t_stop * pq.s, waveforms=waveforms)
            sptr.annotate(unit_name=dspChannelHeaders[chan]["Name"])
            sptr.annotate(channel_index=chan)
            if lazy:
                sptr.lazy_shape = nb_spikes[chan, unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 15
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
    ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade: return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname, blockname)
            if not os.path.isdir(subdir): continue

            seg = Segment(name=blockname)
            bl.segments.append(seg)

            #TSQ is the global index
            tsq_filename = os.path.join(subdir,
                                        tankname + '_' + blockname + '.tsq')
            dt = [
                ('size', 'int32'),
                ('evtype', 'int32'),
                ('code', 'S4'),
                ('channel', 'uint16'),
                ('sortcode', 'uint16'),
                ('timestamp', 'float64'),
                ('eventoffset', 'int64'),
                ('dataformat', 'int32'),
                ('frequency', 'float32'),
            ]
            tsq = np.fromfile(tsq_filename, dtype=dt)

            #0x8801: 'EVTYPE_MARK' give the global_start
            global_t_start = tsq[tsq['evtype'] == 0x8801]['timestamp'][0]

            #TEV is the old data file
            if os.path.exists(
                    os.path.join(subdir, tankname + '_' + blockname + '.tev')):
                tev_filename = os.path.join(
                    subdir, tankname + '_' + blockname + '.tev')
                #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                tev_array = np.fromfile(tev_filename, dtype='uint8')

            else:
                tev_filename = None

            for type_code, type_label in tdt_event_type:
                mask1 = tsq['evtype'] == type_code
                codes = np.unique(tsq[mask1]['code'])

                for code in codes:
                    mask2 = mask1 & (tsq['code'] == code)
                    channels = np.unique(tsq[mask2]['channel'])

                    for channel in channels:
                        mask3 = mask2 & (tsq['channel'] == channel)

                        if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                            if lazy:
                                times = [] * pq.s
                                labels = np.array([], dtype=str)
                            else:
                                times = (tsq[mask3]['timestamp'] -
                                         global_t_start) * pq.s
                                labels = tsq[mask3]['eventoffset'].view(
                                    'float64').astype('S')
                            ea = EventArray(times=times,
                                            name=code,
                                            channel_index=int(channel),
                                            labels=labels)
                            if lazy:
                                ea.lazy_shape = np.sum(mask3)
                            seg.eventarrays.append(ea)

                        elif type_label == 'EVTYPE_SNIP':
                            sortcodes = np.unique(tsq[mask3]['sortcode'])
                            for sortcode in sortcodes:
                                mask4 = mask3 & (tsq['sortcode'] == sortcode)
                                nb_spike = np.sum(mask4)
                                sr = tsq[mask4]['frequency'][0]
                                waveformsize = tsq[mask4]['size'][0] - 10
                                if lazy:
                                    times = [] * pq.s
                                    waveforms = None
                                else:
                                    times = (tsq[mask4]['timestamp'] -
                                             global_t_start) * pq.s
                                    dt = np.dtype(data_formats[
                                        tsq[mask3]['dataformat'][0]])
                                    waveforms = get_chunks(
                                        tsq[mask4]['size'],
                                        tsq[mask4]['eventoffset'],
                                        tev_array).view(dt)
                                    waveforms = waveforms.reshape(
                                        nb_spike, -1, waveformsize)
                                    waveforms = waveforms * pq.mV
                                if nb_spike > 0:
                                    #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                    t_start = 0 * pq.s
                                    t_stop = (tsq['timestamp'][-1] -
                                              global_t_start) * pq.s

                                else:
                                    t_start = 0 * pq.s
                                    t_stop = 0 * pq.s
                                st = SpikeTrain(
                                    times=times,
                                    name='Chan{} Code{}'.format(
                                        channel, sortcode),
                                    t_start=t_start,
                                    t_stop=t_stop,
                                    waveforms=waveforms,
                                    left_sweep=waveformsize / 2. / sr * pq.s,
                                    sampling_rate=sr * pq.Hz,
                                )
                                st.annotate(channel_index=channel)
                                if lazy:
                                    st.lazy_shape = nb_spike
                                seg.spiketrains.append(st)

                        elif type_label == 'EVTYPE_STREAM':
                            dt = np.dtype(
                                data_formats[tsq[mask3]['dataformat'][0]])
                            shape = np.sum(tsq[mask3]['size'] - 10)
                            sr = tsq[mask3]['frequency'][0]
                            if lazy:
                                signal = []
                            else:
                                if PY3K:
                                    signame = code.decode('ascii')
                                else:
                                    signame = code
                                sev_filename = os.path.join(
                                    subdir, tankname + '_' + blockname + '_' +
                                    signame + '_ch' + str(channel) + '.sev')
                                if os.path.exists(sev_filename):
                                    #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                    sig_array = np.fromfile(sev_filename,
                                                            dtype='uint8')
                                else:
                                    sig_array = tev_array
                                signal = get_chunks(tsq[mask3]['size'],
                                                    tsq[mask3]['eventoffset'],
                                                    sig_array).view(dt)

                            anasig = AnalogSignal(
                                signal=signal * pq.V,
                                name='{} {}'.format(code, channel),
                                sampling_rate=sr * pq.Hz,
                                t_start=(tsq[mask3]['timestamp'][0] -
                                         global_t_start) * pq.s,
                                channel_index=int(channel))
                            if lazy:
                                anasig.lazy_shape = shape
                            seg.analogsignals.append(anasig)
            bl.create_many_to_one_relationship()
            return bl
    def read_one_channel_event_or_spike(self, fid, channel_num, header,
                                        lazy=True):
        # return SPikeTrain or Event
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock < 0:
            return
        if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
            return

        # # Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick', 'i4')]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick', 'i4'), ('marker', 'i4')]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('adc', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('real', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('label', 'S%d' % channelHeader.n_extra)]
        dt = np.dtype(fmt)

        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0:
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems

        if lazy:
            if channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = Event()
                ea.annotate(channel_index=channel_num)
                ea.lazy_shape = totalitems
                return ea

            elif channelHeader.kind in [6, 7]:
                # correct value for t_stop to be put in later
                sptr = SpikeTrain([] * pq.s, t_stop=1e99)
                sptr.annotate(channel_index=channel_num, ced_unit = 0)
                sptr.lazy_shape = totalitems
                return sptr
        else:
            alltrigs = np.zeros(totalitems, dtype=dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks):
                blockHeader = HeaderReader(
                    fid, np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring(
                    fid.read(blockHeader.items * dt.itemsize), dtype=dt)

                alltrigs[pos:pos + trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class: eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype(
                'f') * header.us_per_time * header.dtime_base * pq.s

            if channelHeader.kind in [2, 3, 4, 5, 8]:
                #events
                ea = Event(alltimes)
                ea.annotate(channel_index=channel_num)
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels=alltrigs['label'])
                return ea

            elif channelHeader.kind in [6, 7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6:
                    waveforms = np.fromstring(alltrigs['adc'].tostring(),
                                              dtype='i2')
                    waveforms = waveforms.astype(
                        'f4') * channelHeader.scale / 6553.6 + \
                        channelHeader.offset
                elif channelHeader.kind == 7:
                    waveforms = np.fromstring(alltrigs['real'].tostring(),
                                              dtype='f4')

                if header.system_id >= 6 and channelHeader.interleave > 1:
                    waveforms = waveforms.reshape(
                        (alltimes.size, -1, channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1, 2)
                else:
                    waveforms = waveforms.reshape((alltimes.size, 1, -1))

                if header.system_id in [1, 2, 3, 4, 5]:
                    sample_interval = (channelHeader.divide *
                                       header.us_per_time *
                                       header.time_per_adc) * 1e-6
                else:
                    sample_interval = (channelHeader.l_chan_dvd *
                                       header.us_per_time *
                                       header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit])
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit)
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    # can get better value from associated AnalogSignal(s) ?
                    t_stop = alltimes.max()
                else:
                    t_stop = 0.0

                if not self.ced_units:
                    sptr = SpikeTrain(alltimes,
                                                waveforms = waveforms*unit,
                                                sampling_rate = (1./sample_interval)*pq.Hz,
                                                t_stop = t_stop
                                                )
                    sptr.annotate(channel_index = channel_num, ced_unit = 0)
                    return [sptr]

                sptrs = []
                for i in set(alltrigs['marker'] & 255):
                    sptr = SpikeTrain(alltimes[alltrigs['marker'] == i],
                                                waveforms = waveforms[alltrigs['marker'] == i]*unit,
                                                sampling_rate = (1./sample_interval)*pq.Hz,
                                                t_stop = t_stop
                                                )
                    sptr.annotate(channel_index = channel_num, ced_unit = i)
                    sptrs.append(sptr)

                return sptrs
Exemplo n.º 17
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """
        Read in a segment.

        Arguments:
            load_spike_waveform : load or not waveform of spikes (default True)

        """

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader.pop('Year'),
            globalHeader.pop('Month'),
            globalHeader.pop('Day'),
            globalHeader.pop('Hour'),
            globalHeader.pop('Minute'),
            globalHeader.pop('Second')
        )
        seg.file_origin = os.path.basename(self.filename)

        for key, val in globalHeader.iteritems():
            seg.annotate(**{key: val})

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = spikes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']] = channelHeader
            maxunit = max(channelHeader['NUnits'], maxunit)
            maxchan = max(channelHeader['Channel'], maxchan)

        # event channel header
        eventHeaders = { }
        for _ in range(globalHeader['NumEventChannels']):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype='f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
        wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')

        # eventarrays
        nb_events = { }
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() !=-1 :
            # read block header
            dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
            if dataBlockHeader is None : break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan,unit] +=1
                wf_sizes[chan,unit,:] = [n1,n2]
                fid.seek(n1*n2*2,1)
            elif dataBlockHeader['Type'] ==4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2*2, 1)
                if n2> 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] ==0:
                    t_starts[chan] = time
                    

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = { }
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])
                
            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            swfarrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan,unit] = np.zeros(nb_spikes[chan,unit], dtype = 'f')
                if load_spike_waveform:
                    n1,n2 = wf_sizes[chan, unit,:]
                    swfarrays[chan, unit] = np.zeros( (nb_spikes[chan, unit], n1, n2 ) , dtype = 'f4' )
            pos_spikes = np.zeros(nb_spikes.shape, dtype = 'i')
                    
            # allocating mem for event
            eventpositions = { }
            evarrays = { }
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = {
                    'times': np.zeros(nb, dtype='f'),
                    'labels': np.zeros(nb, dtype='S4')
                }
                eventpositions[chan]=0 
                
            fid.seek(start)
            while fid.tell() !=-1 :
                dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
                if dataBlockHeader is None : break
                chan = dataBlockHeader['Channel']
                n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
                time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']
                time/= globalHeader['ADFrequency']

                if n2 <0: break
                if dataBlockHeader['Type'] == 1:
                    #spike
                    unit = dataBlockHeader['Unit']
                    pos = pos_spikes[chan,unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1*n2 != 0 :
                        swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4')
                    else:
                        fid.seek(n1*n2*2,1)
                    pos_spikes[chan,unit] +=1
                
                elif dataBlockHeader['Type'] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan]['times'][pos] = time
                    evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
                    eventpositions[chan]+= 1

                elif dataBlockHeader['Type'] == 5:
                    #signal
                    data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data
                    sample_positions[chan] += data.size


        ## Step 4: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
                labels = None
            else:
                times = evarrays[chan]['times']
                labels = evarrays[chan]['labels']
            ea = EventArray(
                times*pq.s,
                labels=labels,
                channel_name=eventHeaders[chan]['Name'],
                channel_index=chan
            )
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

            
        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = [ ]
            else:
                if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
                elif globalHeader['Version'] ==102 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                elif globalHeader['Version'] >= 103:
                    gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
                                                        slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                signal = sigarrays[chan]*gain
            anasig =  AnalogSignal(signal*pq.V,
                sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
                t_start = t_starts[chan]*pq.s,
                channel_index = slowChannelHeaders[chan]['Channel'],
                channel_name = slowChannelHeaders[chan]['Name'],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)
            
        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0: continue
            if lazy:
                times = [ ]
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan,unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader['Version'] <103:
                        gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
                    elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
                    elif globalHeader['Version'] >105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])                    
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(
                times,
                units='s', 
                t_stop=t_stop*pq.s,
                waveforms=waveforms
            )
            sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
            sptr.annotate(channel_index = chan)
            for key, val in dspChannelHeaders[chan].iteritems():
                sptr.annotate(**{key: val})

            if lazy:
                sptr.lazy_shape = nb_spikes[chan,unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 18
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)

            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            global_t_start = None
            # Step 1 : first loop for counting - tsq file
            tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
            hr = HeaderReader(tsq, TsqDescription)
            allsig = { }
            allspiketr = { }
            allevent = { }
            while 1:
                h= hr.read_f()
                if h==None:break

                channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                if Types[evtype] == 'EVTYPE_UNKNOWN':
                    pass

                elif Types[evtype] == 'EVTYPE_MARK' :
                    if global_t_start is None:
                        global_t_start = h['timestamp']

                elif Types[evtype] == 'EVTYPE_SCALER' :
                    # TODO
                    pass

                elif Types[evtype] == 'EVTYPE_STRON' or \
                     Types[evtype] == 'EVTYPE_STROFF':
                    # EVENTS

                    if code not in allevent:
                        allevent[code] = { }
                    if channel not in allevent[code]:
                        ea = EventArray(name = code , channel_index = channel)
                        # for counting:
                        ea.lazy_shape = 0
                        ea.maxlabelsize = 0


                        allevent[code][channel] = ea

                    allevent[code][channel].lazy_shape += 1
                    strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    strobe = str(strobe)
                    if len(strobe)>= allevent[code][channel].maxlabelsize:
                        allevent[code][channel].maxlabelsize = len(strobe)

                    #~ ev = Event()
                    #~ ev.time = h['timestamp'] - global_t_start
                    #~ ev.name = code
                     #~ # it the strobe attribute masked with eventoffset
                    #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    #~ ev.label = str(strobe)
                    #~ seg._events.append( ev )

                elif Types[evtype] == 'EVTYPE_SNIP' :

                    if code not in allspiketr:
                        allspiketr[code] = { }
                    if channel not in allspiketr[code]:
                        allspiketr[code][channel] = { }
                    if h['sortcode'] not in allspiketr[code][channel]:





                        sptr = SpikeTrain([ ], units = 's',
                                                        name = str(h['sortcode']),
                                                        #t_start = global_t_start,
                                                        t_start = 0.*pq.s,
                                                        t_stop = 0.*pq.s, # temporary
                                                        left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
                                                        sampling_rate = h['frequency'] * pq.Hz,

                                                        )
                        #~ sptr.channel = channel
                        #sptr.annotations['channel_index'] = channel
                        sptr.annotate(channel_index = channel)

                        # for counting:
                        sptr.lazy_shape = 0
                        sptr.pos = 0
                        sptr.waveformsize = h['size']-10

                        #~ sptr.name = str(h['sortcode'])
                        #~ sptr.t_start = global_t_start
                        #~ sptr.sampling_rate = h['frequency']
                        #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.waveformsize = h['size']-10

                        allspiketr[code][channel][h['sortcode']] = sptr

                    allspiketr[code][channel][h['sortcode']].lazy_shape += 1

                elif Types[evtype] == 'EVTYPE_STREAM':
                    if code not in allsig:
                        allsig[code] = { }
                    if channel not in allsig[code]:
                        #~ print 'code', code, 'channel',  channel
                        anaSig = AnalogSignal([] * pq.V,
                                              name=code,
                                              sampling_rate=
                                              h['frequency'] * pq.Hz,
                                              t_start=(h['timestamp'] -
                                                       global_t_start) * pq.s,
                                              channel_index=channel)
                        anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']])
                        anaSig.pos = 0

                        # for counting:
                        anaSig.lazy_shape = 0
                        #~ anaSig.pos = 0
                        allsig[code][channel] = anaSig
                    allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize

            if not lazy:
                # Step 2 : allocate memory
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V )
                        v[channel].pos = 0

                for code, v in iteritems(allevent):
                    for channel, ea in iteritems(v):
                        ea.times = np.empty( (ea.lazy_shape)  ) * pq.s
                        ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) )
                        ea.pos = 0

                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s ,
                                                            name = sptr.name,
                                                            t_start = sptr.t_start,
                                                            t_stop = sptr.t_stop,
                                                            left_sweep = sptr.left_sweep,
                                                            sampling_rate = sptr.sampling_rate,
                                                            waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV ,
                                                        )
                            new.annotations.update(sptr.annotations)
                            new.pos = 0
                            new.waveformsize = sptr.waveformsize
                            allsorted[sortcode] = new

                # Step 3 : searh sev (individual data files) or tev (common data file)
                # sev is for version > 70
                if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                    tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb')
                else:
                    tev = None
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        if PY3K:
                            signame = anaSig.name.decode('ascii')
                        else:
                            signame = anaSig.name
                        filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev')
                        if os.path.exists(filename):
                            anaSig.fid = open(filename, 'rb')
                        else:
                            anaSig.fid = tev
                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            sptr.fid = tev

                # Step 4 : second loop for copyin chunk of data
                tsq.seek(0)
                while 1:
                    h= hr.read_f()
                    if h==None:break
                    channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                    if Types[evtype] == 'EVTYPE_STREAM':
                        a = allsig[code][channel]
                        dt = a.dtype
                        s = int((h['size']*4-40)/dt.itemsize)
                        a.fid.seek(h['eventoffset'])
                        a[ a.pos:a.pos+s ]  = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype)
                        a.pos += s

                    elif Types[evtype] == 'EVTYPE_STRON' or \
                        Types[evtype] == 'EVTYPE_STROFF':
                        ea = allevent[code][channel]
                        ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s
                        strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                        ea.labels[ea.pos] = str(strobe)
                        ea.pos += 1

                    elif Types[evtype] == 'EVTYPE_SNIP':
                        sptr = allspiketr[code][channel][h['sortcode']]
                        sptr.t_stop =  (h['timestamp'] - global_t_start) * pq.s
                        sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s
                        sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V
                        sptr.pos += 1


            # Step 5 : populating segment
            for code, v in iteritems(allsig):
                for channel, anaSig in iteritems(v):
                    seg.analogsignals.append( anaSig )

            for code, v in iteritems(allevent):
                for channel, ea in iteritems(v):
                    seg.eventarrays.append( ea )


            for code, v in iteritems(allspiketr):
                for channel, allsorted in iteritems(v):
                    for sortcode, sptr in iteritems(allsorted):
                        seg.spiketrains.append( sptr )

        bl.create_many_to_one_relationship()
        return bl
Exemplo n.º 19
0
    def read_nev(self, filename_nev, seg, lazy, cascade, load_waveforms = False):
        # basic header
        dt = [('header_id','S8'),
                    ('ver_major','uint8'),
                    ('ver_minor','uint8'),
                    ('additionnal_flag', 'uint16'), # Read flags, currently basically unused
                    ('header_size', 'uint32'), #i.e. index of first data
                    ('packet_size', 'uint32'),# Read number of packet bytes, i.e. byte per sample
                    ('sampling_rate', 'uint32'),# Read time resolution in Hz of time stamps, i.e. data packets
                    ('waveform_sampling_rate', 'uint32'),# Read sampling frequency of waveforms in Hz
                    ('window_datetime', 'S16'),
                    ('application', 'S32'), # 
                    ('comments', 'S256'), # comments
                    ('num_ext_header', 'uint32') #Read number of extended headers
                    
                ]
        nev_header = h = np.fromfile(filename_nev, count = 1, dtype = dt)[0]
        version = '{0}.{1}'.format(h['ver_major'], h['ver_minor'])
        assert h['header_id'].decode('ascii') == 'NEURALEV' or version == '2.1', 'Unsupported version {0}'.format(version)
        version = '{0}.{1}'.format(h['ver_major'], h['ver_minor'])
        seg.annotate(blackrock_version = version)
        seg.rec_datetime = get_window_datetime(nev_header['window_datetime'])
        sr = float(h['sampling_rate'])
        wsr = float(h['waveform_sampling_rate'])
        
        if not cascade:
            return
        
        # extented header
        # this consist in N block with code 8bytes + 24 data bytes
        # the data bytes depend on the code and need to be converted cafilename_nsx, segse by case
        raw_ext_header = np.memmap(filename_nev, offset = np.dtype(dt).itemsize,
                                                dtype = [('code', 'S8'), ('data', 'S24')],  shape = h['num_ext_header'])
        # this is for debuging
        ext_header = { }
        for code, dt_ext in ext_nev_header_codes.items():
            sel = raw_ext_header['code']==code
            ext_header[code] = raw_ext_header[sel].view(dt_ext)
        
        
        # channel label
        neuelbl_header = ext_header['NEUEVLBL']
        # Sometimes when making the channel labels we have only one channel and so must address it differently.
        try:
            channel_labels = dict(zip(neuelbl_header['channel_id'], neuelbl_header['channel_label']))
        except TypeError:
            channel_labels = dict([(neuelbl_header['channel_id'], neuelbl_header['channel_label'])])

        # TODO ext_header['DIGLABEL'] is there only one label ???? because no id in that case
        # TODO ECOMMENT + CCOMMENT for annotations
        # TODO NEUEVFLT for annotations
        
        
        # read data packet and markers
        dt0 =  [('samplepos', 'uint32'),
                    ('id', 'uint16'), 
                    ('value', 'S{0}'.format(h['packet_size']-6)),
            ]
        data = np.memmap( filename_nev, offset = h['header_size'], dtype = dt0)
        all_ids = np.unique(data['id'])
        
        t_start = 0*pq.s
        t_stop = data['samplepos'][-1]/sr*pq.s
        
        
        
        # read event (digital 9+ analog+comment)
        def create_event_array_trig_or_analog(selection, name, labelmode = None):
            if lazy:
                times = [ ]
                labels = np.array([ ], dtype = 'S')
            else:
                times = data_trigger['samplepos'][selection].astype(float)/sr
                if labelmode == 'digital_port':
                    labels = data_trigger['digital_port'][selection].astype('S2')
                elif labelmode is None:
                    label = None
            ev = EventArray(times= times*pq.s,
                            labels= labels,
                            name=name)
            if lazy:
                ev.lazy_shape = np.sum(is_digital)
            seg.eventarrays.append(ev)

        mask = (data['id']==0) 
        dt_trig =  [('samplepos', 'uint32'),
                    ('id', 'uint16'), 
                    ('reason', 'uint8'), 
                    ('reserved0', 'uint8'), 
                    ('digital_port', 'uint16'), 
                    ('reserved1', 'S{0}'.format(h['packet_size']-10)),
                ]
        data_trigger = data.view(dt_trig)[mask]
        # Digital Triggers (PaquetID 0)
        is_digital = (data_trigger ['reason']&1)>0
        create_event_array_trig_or_analog(is_digital, 'Digital trigger', labelmode =  'digital_port' )
        
        # Analog Triggers (PaquetID 0)
        if version in ['2.1', '2.2' ]:
            for i in range(5):
                is_analog = (data_trigger ['reason']&(2**(i+1)))>0
                create_event_array_trig_or_analog(is_analog, 'Analog trigger {0}'.format(i), labelmode = None)
        
        # Comments
        mask = (data['id']==0xFFF) 
        dt_comments = [('samplepos', 'uint32'),
                    ('id', 'uint16'), 
                    ('charset', 'uint8'), 
                    ('reserved0', 'uint8'), 
                    ('color', 'uint32'), 
                    ('comment', 'S{0}'.format(h['packet_size']-12)),
                ]
        data_comments = data.view(dt_comments)[mask]
        if data_comments.size>0:
            if lazy:
                times = [ ]
                labels = [ ]
            else:
                times = data_comments['samplepos'].astype(float)/sr
                labels = data_comments['comment'].astype('S')
            ev = EventArray(times= times*pq.s,
                            labels= labels,
                            name='Comments')
            if lazy:
                ev.lazy_shape = np.sum(is_digital)
            seg.eventarrays.append(ev)
        
        
        # READ Spike channel
        channel_ids = all_ids[(all_ids>0) & (all_ids<=2048)]

        # get the dtype of waveform (this is stupidly complicated)
        if nev_header['additionnal_flag']&0x1:
            #dtype_waveforms = { k:'int16' for k in channel_ids }
            dtype_waveforms = dict( (k,'int16') for k in channel_ids)
        else:
            # there is a code electrodes by electrodes given the approiate dtype
            neuewav_header = ext_header['NEUEVWAV']
            dtype_waveform = dict(zip(neuewav_header['channel_id'], neuewav_header['num_bytes_per_waveform']))
            dtypes_conv = { 0: 'int8', 1 : 'int8', 2: 'int16', 4 : 'int32' }
            #dtype_waveforms = { k:dtypes_conv[v] for k,v in dtype_waveform.items() }
            dtype_waveforms = dict( (k,dtypes_conv[v]) for k,v in dtype_waveform.items() )
        
        dt2 =   [('samplepos', 'uint32'),
                    ('id', 'uint16'), 
                    ('cluster', 'uint8'), 
                    ('reserved0', 'uint8'), 
                    ('waveform','uint8',(h['packet_size']-8, )),
                ]
        data_spike = data.view(dt2)
        
        for channel_id in channel_ids:
            data_spike_chan = data_spike[data['id']==channel_id]
            cluster_ids = np.unique(data_spike_chan['cluster'])
            for cluster_id in cluster_ids:
                if cluster_id==0: 
                    name =  'unclassified'
                elif cluster_id==255:
                    name =  'noise'
                else:
                    name = 'Cluster {0}'.format(cluster_id)
                name = 'Channel {0} '.format(channel_id)+name
                
                data_spike_chan_clus = data_spike_chan[data_spike_chan['cluster']==cluster_id]
                n_spike = data_spike_chan_clus.size
                waveforms, w_sampling_rate, left_sweep = None, None, None
                if lazy:
                    times = [ ]
                else:
                    times = data_spike_chan_clus['samplepos'].astype(float)/sr
                    if load_waveforms:
                        dtype_waveform = dtype_waveforms[channel_id]
                        waveform_size = (h['packet_size']-8)/np.dtype(dtype_waveform).itemsize
                        waveforms = data_spike_chan_clus['waveform'].flatten().view(dtype_waveform)
                        waveforms = waveforms.reshape(n_spike,1, waveform_size)
                        waveforms =waveforms*pq.uV
                        w_sampling_rate = wsr*pq.Hz
                        left_sweep = waveform_size//2/sr*pq.s
                st = SpikeTrain(times =  times*pq.s, name = name,
                                t_start = t_start, t_stop =t_stop,
                                waveforms = waveforms, sampling_rate = w_sampling_rate, left_sweep = left_sweep)
                st.annotate(channel_index = int(channel_id))
                if lazy:
                    st.lazy_shape = n_spike
                seg.spiketrains.append(st)
Exemplo n.º 20
0
    def read_segment(self, lazy=False, cascade=True):
        fid = open(self.filename, 'rb')
        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        # ~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=global_header['version'])
        seg.annotate(comment=global_header['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(global_header['nvar']):
            entity_header = HeaderReader(fid, EntityHeader).read_f(
                offset=offset + i * 208)
            entity_header['name'] = entity_header['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entity_header['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    t_stop=global_header['tend'] /
                    global_header['freq'] * pq.s,
                    name=entity_header['name'])
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    event_times = event_times.astype('f8') / global_header[
                        'freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = Event(times=event_times, labels=labels,
                             channel_name=entity_header['name'])
                if lazy:
                    evar.lazy_shape = entity_header['n']
                seg.events.append(evar)

            if entity_header['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    start_times = start_times.astype('f8') / global_header[
                        'freq'] * pq.s
                    stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                           shape=(entity_header['n']),
                                           offset=entity_header['offset'] +
                                           entity_header['n'] * 4)
                    stop_times = stop_times.astype('f') / global_header[
                        'freq'] * pq.s
                epar = Epoch(times=start_times,
                             durations=stop_times - start_times,
                             labels=np.array([''] * start_times.size,
                                             dtype='S'),
                             channel_name=entity_header['name'])
                if lazy:
                    epar.lazy_shape = entity_header['n']
                seg.epochs.append(epar)

            if entity_header['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s

                    waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
                                          shape=(entity_header['n'], 1,
                                                 entity_header['NPointsWave']),
                                          offset=entity_header['offset'] +
                                          entity_header['n'] * 4)
                    waveforms = (waveforms.astype('f') *
                                 entity_header['ADtoMV'] +
                                 entity_header['MVOffset']) * pq.mV
                t_stop = global_header['tend'] / global_header['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/
                    #~ globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop, name=entity_header['name'],
                    waveforms=waveforms,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms)
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 4:
                # popvectors
                pass

            if entity_header['type'] == 5:
                # analog
                timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
                                       shape=(entity_header['n']),
                                       offset=entity_header['offset'])
                timestamps = timestamps.astype('f8') / global_header['freq']
                fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                fragment_starts = fragment_starts.astype('f8') / global_header[
                    'freq']
                t_start = timestamps[0] - fragment_starts[0] / float(
                    entity_header['WFrequency'])
                del timestamps, fragment_starts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(self.filename, np.dtype('i2'), 'r',
                                       shape=(entity_header['NPointsWave']),
                                       offset=entity_header['offset'])
                    signal = signal.astype('f')
                    signal *= entity_header['ADtoMV']
                    signal += entity_header['MVOffset']
                    signal = signal * pq.mV

                ana_sig = AnalogSignal(
                    signal=signal, t_start=t_start * pq.s,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    name=entity_header['name'],
                    channel_index=entity_header['WireNumber'])
                if lazy:
                    ana_sig.lazy_shape = entity_header['NPointsWave']
                seg.analogsignals.append(ana_sig)

            if entity_header['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                      shape=(entity_header['n']),
                                      offset=entity_header['offset'])
                    times = times.astype('f8') / global_header['freq'] * pq.s
                    fid.seek(entity_header['offset'] + entity_header['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename, np.dtype(
                            'S' + str(entity_header['MarkerLength'])),
                        'r', shape=(entity_header['n']),
                        offset=entity_header['offset'] +
                        entity_header['n'] * 4 + 64)
                ea = Event(times=times,
                           labels=labels.view(np.ndarray),
                           name=entity_header['name'],
                           channel_index=entity_header['WireNumber'],
                           marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entity_header['n']
                seg.events.append(ea)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 21
0
    def read_segment(self, block_index=0, seg_index=0, lazy=False,
                     signal_group_mode=None, load_waveforms=False, time_slice=None):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default.

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.
        """

        if lazy:
            warnings.warn(
                "Lazy is deprecated and will be replaced by ProxyObject functionality.",
                DeprecationWarning)

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        seg_t_start = self.segment_t_start(block_index, seg_index) * pq.s
        seg_t_stop = self.segment_t_stop(block_index, seg_index) * pq.s

        # get only a slice of objects limited by t_start and t_stop time_slice = (t_start, t_stop)
        if time_slice is None:
            t_start, t_stop = None, None
            t_start_, t_stop_ = None, None
        else:
            assert not lazy, 'time slice only work when not lazy'
            t_start, t_stop = time_slice

            t_start = ensure_second(t_start)
            t_stop = ensure_second(t_stop)

            # checks limits
            if t_start < seg_t_start:
                t_start = seg_t_start
            if t_stop > seg_t_stop:
                t_stop = seg_t_stop

            # in float format in second (for rawio clip)
            t_start_, t_stop_ = float(t_start.magnitude), float(t_stop.magnitude)

            # new spiketrain limits
            seg_t_start = t_start
            seg_t_stop = t_stop

        # AnalogSignal
        signal_channels = self.header['signal_channels']

        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                sr = self.get_signal_sampling_rate(channel_indexes) * pq.Hz
                sig_t_start = self.get_signal_t_start(
                    block_index, seg_index, channel_indexes) * pq.s

                sig_size = self.get_signal_size(block_index=block_index, seg_index=seg_index,
                                                channel_indexes=channel_indexes)
                if not lazy:
                    # in case of time_slice get: get i_start, i_stop, new sig_t_start
                    if t_stop is not None:
                        i_stop = int((t_stop - sig_t_start).magnitude * sr.magnitude)
                        if i_stop > sig_size:
                            i_stop = sig_size
                    else:
                        i_stop = None
                    if t_start is not None:
                        i_start = int((t_start - sig_t_start).magnitude * sr.magnitude)
                        if i_start < 0:
                            i_start = 0
                        sig_t_start += (i_start / sr).rescale('s')
                    else:
                        i_start = None

                    raw_signal = self.get_analogsignal_chunk(block_index=block_index,
                                                             seg_index=seg_index, i_start=i_start,
                                                             i_stop=i_stop,
                                                             channel_indexes=channel_indexes)
                    float_signal = self.rescale_signal_raw_to_float(
                        raw_signal,
                        dtype='float32',
                        channel_indexes=channel_indexes)

                for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
                        channel_indexes,
                        signal_group_mode=signal_group_mode).items():
                    units = np.unique(signal_channels[ind_abs]['units'])
                    assert len(units) == 1
                    units = ensure_signal_units(units[0])

                    if signal_group_mode == 'split-all':
                        # in that case annotations by channel is OK
                        chan_index = ind_abs[0]
                        d = self.raw_annotations['blocks'][block_index]['segments'][seg_index][
                            'signals'][chan_index]
                        annotations = dict(d)
                        if 'name' not in annotations:
                            annotations['name'] = signal_channels['name'][chan_index]
                    else:
                        # when channel are grouped by same unit
                        # annotations have channel_names and channel_ids array
                        # this will be moved in array annotations soon
                        annotations = {}
                        annotations['name'] = 'Channel bundle ({}) '.format(
                            ','.join(signal_channels[ind_abs]['name']))
                        annotations['channel_names'] = signal_channels[ind_abs]['name']
                        annotations['channel_ids'] = signal_channels[ind_abs]['id']
                    annotations = check_annotations(annotations)
                    if lazy:
                        anasig = AnalogSignal(np.array([]), units=units, copy=False,
                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
                        anasig.lazy_shape = (sig_size, len(ind_within))
                    else:
                        anasig = AnalogSignal(float_signal[:, ind_within], units=units, copy=False,
                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            if not lazy and load_waveforms:
                raw_waveforms = self.get_spike_raw_waveforms(block_index=block_index,
                                                             seg_index=seg_index,
                                                             unit_index=unit_index,
                                                             t_start=t_start_, t_stop=t_stop_)
                float_waveforms = self.rescale_waveforms_to_float(raw_waveforms, dtype='float32',
                                                                  unit_index=unit_index)
                wf_units = ensure_signal_units(unit_channels['wf_units'][unit_index])
                waveforms = pq.Quantity(float_waveforms, units=wf_units,
                                        dtype='float32', copy=False)
                wf_sampling_rate = unit_channels['wf_sampling_rate'][unit_index]
                wf_left_sweep = unit_channels['wf_left_sweep'][unit_index]
                if wf_left_sweep > 0:
                    wf_left_sweep = float(wf_left_sweep) / wf_sampling_rate * pq.s
                else:
                    wf_left_sweep = None
                wf_sampling_rate = wf_sampling_rate * pq.Hz
            else:
                waveforms = None
                wf_left_sweep = None
                wf_sampling_rate = None

            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][
                unit_index]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = unit_channels['name'][c]
            annotations = check_annotations(annotations)

            if not lazy:
                spike_timestamp = self.get_spike_timestamps(block_index=block_index,
                                                            seg_index=seg_index,
                                                            unit_index=unit_index,
                                                            t_start=t_start_, t_stop=t_stop_)
                spike_times = self.rescale_spike_timestamp(spike_timestamp, 'float64')
                sptr = SpikeTrain(spike_times, units='s', copy=False,
                                  t_start=seg_t_start, t_stop=seg_t_stop,
                                  waveforms=waveforms, left_sweep=wf_left_sweep,
                                  sampling_rate=wf_sampling_rate, **annotations)
            else:
                nb = self.spike_count(block_index=block_index, seg_index=seg_index,
                                      unit_index=unit_index)
                sptr = SpikeTrain(np.array([]), units='s', copy=False, t_start=seg_t_start,
                                  t_stop=seg_t_stop, **annotations)
                sptr.lazy_shape = (nb,)

            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if not lazy:
                ev_timestamp, ev_raw_durations, ev_labels = self.get_event_timestamps(
                    block_index=block_index,
                    seg_index=seg_index, event_channel_index=chan_ind,
                    t_start=t_start_, t_stop=t_stop_)
                ev_times = self.rescale_event_timestamp(ev_timestamp, 'float64') * pq.s
                if ev_raw_durations is None:
                    ev_durations = None
                else:
                    ev_durations = self.rescale_epoch_duration(ev_raw_durations, 'float64') * pq.s
                ev_labels = ev_labels.astype('S')
            else:
                nb = self.event_count(block_index=block_index, seg_index=seg_index,
                                      event_channel_index=chan_ind)
                lazy_shape = (nb,)
                ev_times = np.array([]) * pq.s
                ev_labels = np.array([], dtype='S')
                ev_durations = np.array([]) * pq.s

            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][
                chan_ind]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = event_channels['name'][chan_ind]

            annotations = check_annotations(annotations)

            if event_channels['type'][chan_ind] == b'event':
                e = Event(times=ev_times, labels=ev_labels, units='s', copy=False, **annotations)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = Epoch(times=ev_times, durations=ev_durations, labels=ev_labels,
                          units='s', copy=False, **annotations)
                e.segment = seg
                seg.epochs.append(e)

            if lazy:
                e.lazy_shape = lazy_shape

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 22
0
    def read_block(self, lazy=False, cascade=True):
        """Returns a Block containing spike information.

        There is no obvious way to infer the segment boundaries from
        raw spike times, so for now all spike times are returned in one
        big segment. The way around this would be to specify the segment
        boundaries, and then change this code to put the spikes in the right
        segments.
        """
        # Create block and segment to hold all the data
        block = Block()
        # Search data directory for KlustaKwik files.
        # If nothing found, return empty block
        self._fetfiles = self._fp.read_filenames('fet')
        self._clufiles = self._fp.read_filenames('clu')
        if len(self._fetfiles) == 0 or not cascade:
            return block

        # Create a single segment to hold all of the data
        seg = Segment(name='seg0', index=0, file_origin=self.filename)
        block.segments.append(seg)

        # Load spike times from each group and store in a dict, keyed
        # by group number
        self.spiketrains = dict()
        for group in sorted(self._fetfiles.keys()):
            # Load spike times
            fetfile = self._fetfiles[group]
            spks, features = self._load_spike_times(fetfile)

            # Load cluster ids or generate
            if group in self._clufiles:
                clufile = self._clufiles[group]
                uids = self._load_unit_id(clufile)
            else:
                # unclustered data, assume all zeros
                uids = np.zeros(spks.shape, dtype=np.int32)

            # error check
            if len(spks) != len(uids):
                raise ValueError("lengths of fet and clu files are different")

            # Create Unit for each cluster
            unique_unit_ids = np.unique(uids)
            for unit_id in sorted(unique_unit_ids):
                # Initialize the unit
                u = Unit(name=('unit %d from group %d' % (unit_id, group)),
                    index=unit_id, group=group)

                # Initialize a new SpikeTrain for the spikes from this unit
                if lazy:
                    st = SpikeTrain(
                        times=[],
                        units='sec', t_start=0.0,
                        t_stop=spks.max() / self.sampling_rate,
                        name=('unit %d from group %d' % (unit_id, group)))
                    st.lazy_shape = len(spks[uids==unit_id])
                else:
                    st = SpikeTrain(
                        times=spks[uids==unit_id] / self.sampling_rate,
                        units='sec', t_start=0.0,
                        t_stop=spks.max() / self.sampling_rate,
                        name=('unit %d from group %d' % (unit_id, group)))
                st.annotations['cluster'] = unit_id
                st.annotations['group'] = group

                # put features in
                if not lazy and len(features) != 0:
                    st.annotations['waveform_features'] = features

                # Link
                u.spiketrains.append(st)
                seg.spiketrains.append(st)

        block.create_many_to_one_relationship()
        return block
Exemplo n.º 23
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)

            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            global_t_start = None
            # Step 1 : first loop for counting - tsq file
            tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
            hr = HeaderReader(tsq, TsqDescription)
            allsig = { }
            allspiketr = { }
            allevent = { }
            while 1:
                h= hr.read_f()
                if h==None:break

                channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                if Types[evtype] == 'EVTYPE_UNKNOWN':
                    pass

                elif Types[evtype] == 'EVTYPE_MARK' :
                    if global_t_start is None:
                        global_t_start = h['timestamp']

                elif Types[evtype] == 'EVTYPE_SCALER' :
                    # TODO
                    pass

                elif Types[evtype] == 'EVTYPE_STRON' or \
                     Types[evtype] == 'EVTYPE_STROFF':
                    # EVENTS

                    if code not in allevent:
                        allevent[code] = { }
                    if channel not in allevent[code]:
                        ea = EventArray(name = code , channel_index = channel)
                        # for counting:
                        ea.lazy_shape = 0
                        ea.maxlabelsize = 0


                        allevent[code][channel] = ea

                    allevent[code][channel].lazy_shape += 1
                    strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    strobe = str(strobe)
                    if len(strobe)>= allevent[code][channel].maxlabelsize:
                        allevent[code][channel].maxlabelsize = len(strobe)

                    #~ ev = Event()
                    #~ ev.time = h['timestamp'] - global_t_start
                    #~ ev.name = code
                     #~ # it the strobe attribute masked with eventoffset
                    #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    #~ ev.label = str(strobe)
                    #~ seg._events.append( ev )

                elif Types[evtype] == 'EVTYPE_SNIP' :

                    if code not in allspiketr:
                        allspiketr[code] = { }
                    if channel not in allspiketr[code]:
                        allspiketr[code][channel] = { }
                    if h['sortcode'] not in allspiketr[code][channel]:





                        sptr = SpikeTrain([ ], units = 's',
                                                        name = str(h['sortcode']),
                                                        #t_start = global_t_start,
                                                        t_start = 0.*pq.s,
                                                        t_stop = 0.*pq.s, # temporary
                                                        left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
                                                        sampling_rate = h['frequency'] * pq.Hz,

                                                        )
                        #~ sptr.channel = channel
                        #sptr.annotations['channel_index'] = channel
                        sptr.annotate(channel_index = channel)

                        # for counting:
                        sptr.lazy_shape = 0
                        sptr.pos = 0
                        sptr.waveformsize = h['size']-10

                        #~ sptr.name = str(h['sortcode'])
                        #~ sptr.t_start = global_t_start
                        #~ sptr.sampling_rate = h['frequency']
                        #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.waveformsize = h['size']-10

                        allspiketr[code][channel][h['sortcode']] = sptr

                    allspiketr[code][channel][h['sortcode']].lazy_shape += 1

                elif Types[evtype] == 'EVTYPE_STREAM':
                    if code not in allsig:
                        allsig[code] = { }
                    if channel not in allsig[code]:
                        #~ print 'code', code, 'channel',  channel
                        anaSig = AnalogSignal([] * pq.V,
                                              name=code,
                                              sampling_rate=
                                              h['frequency'] * pq.Hz,
                                              t_start=(h['timestamp'] -
                                                       global_t_start) * pq.s,
                                              channel_index=channel)
                        anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']])
                        anaSig.pos = 0

                        # for counting:
                        anaSig.lazy_shape = 0
                        #~ anaSig.pos = 0
                        allsig[code][channel] = anaSig
                    allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize

            if not lazy:
                # Step 2 : allocate memory
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V )
                        v[channel].pos = 0

                for code, v in iteritems(allevent):
                    for channel, ea in iteritems(v):
                        ea.times = np.empty( (ea.lazy_shape)  ) * pq.s
                        ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) )
                        ea.pos = 0

                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s ,
                                                            name = sptr.name,
                                                            t_start = sptr.t_start,
                                                            t_stop = sptr.t_stop,
                                                            left_sweep = sptr.left_sweep,
                                                            sampling_rate = sptr.sampling_rate,
                                                            waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV ,
                                                        )
                            new.annotations.update(sptr.annotations)
                            new.pos = 0
                            new.waveformsize = sptr.waveformsize
                            allsorted[sortcode] = new

                # Step 3 : searh sev (individual data files) or tev (common data file)
                # sev is for version > 70
                if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                    tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb')
                else:
                    tev = None
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        if PY3K:
                            signame = anaSig.name.decode('ascii')
                        else:
                            signame = anaSig.name
                        filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev')
                        if os.path.exists(filename):
                            anaSig.fid = open(filename, 'rb')
                        else:
                            anaSig.fid = tev
                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            sptr.fid = tev

                # Step 4 : second loop for copyin chunk of data
                tsq.seek(0)
                while 1:
                    h= hr.read_f()
                    if h==None:break
                    channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                    if Types[evtype] == 'EVTYPE_STREAM':
                        a = allsig[code][channel]
                        dt = a.dtype
                        s = int((h['size']*4-40)/dt.itemsize)
                        a.fid.seek(h['eventoffset'])
                        a[ a.pos:a.pos+s ]  = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype)
                        a.pos += s

                    elif Types[evtype] == 'EVTYPE_STRON' or \
                        Types[evtype] == 'EVTYPE_STROFF':
                        ea = allevent[code][channel]
                        ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s
                        strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                        ea.labels[ea.pos] = str(strobe)
                        ea.pos += 1

                    elif Types[evtype] == 'EVTYPE_SNIP':
                        sptr = allspiketr[code][channel][h['sortcode']]
                        sptr.t_stop =  (h['timestamp'] - global_t_start) * pq.s
                        sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s
                        sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V
                        sptr.pos += 1


            # Step 5 : populating segment
            for code, v in iteritems(allsig):
                for channel, anaSig in iteritems(v):
                    seg.analogsignals.append( anaSig )

            for code, v in iteritems(allevent):
                for channel, ea in iteritems(v):
                    seg.eventarrays.append( ea )


            for code, v in iteritems(allspiketr):
                for channel, allsorted in iteritems(v):
                    for sortcode, sptr in iteritems(allsorted):
                        seg.spiketrains.append( sptr )

        create_many_to_one_relationship(bl)
        return bl
Exemplo n.º 24
0
    def read_segment(self,
                     block_index=0,
                     seg_index=0,
                     lazy=False,
                     signal_group_mode=None,
                     load_waveforms=False,
                     time_slice=None):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default. 

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.
        """

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(
            self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        seg_t_start = self.segment_t_start(block_index, seg_index) * pq.s
        seg_t_stop = self.segment_t_stop(block_index, seg_index) * pq.s

        # get only a slice of objects limited by t_start and t_stop time_slice = (t_start, t_stop)
        if time_slice is None:
            t_start, t_stop = None, None
            t_start_, t_stop_ = None, None
        else:
            assert not lazy, 'time slice only work when not lazy'
            t_start, t_stop = time_slice

            t_start = ensure_second(t_start)
            t_stop = ensure_second(t_stop)

            # checks limits
            if t_start < seg_t_start:
                t_start = seg_t_start
            if t_stop > seg_t_stop:
                t_stop = seg_t_stop

            # in float format in second (for rawio clip)
            t_start_, t_stop_ = float(t_start.magnitude), float(
                t_stop.magnitude)

            # new spiketrain limits
            seg_t_start = t_start
            seg_t_stop = t_stop

        # AnalogSignal
        signal_channels = self.header['signal_channels']

        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                sr = self.get_signal_sampling_rate(channel_indexes) * pq.Hz
                sig_t_start = self.get_signal_t_start(block_index, seg_index,
                                                      channel_indexes) * pq.s

                sig_size = self.get_signal_size(
                    block_index=block_index,
                    seg_index=seg_index,
                    channel_indexes=channel_indexes)
                if not lazy:
                    # in case of time_slice get: get i_start, i_stop, new sig_t_start
                    if t_stop is not None:
                        i_stop = int(
                            (t_stop - sig_t_start).magnitude * sr.magnitude)
                        if i_stop > sig_size:
                            i_stop = sig_size
                    else:
                        i_stop = None
                    if t_start is not None:
                        i_start = int(
                            (t_start - sig_t_start).magnitude * sr.magnitude)
                        if i_start < 0:
                            i_start = 0
                        sig_t_start += (i_start / sr).rescale('s')
                    else:
                        i_start = None

                    raw_signal = self.get_analogsignal_chunk(
                        block_index=block_index,
                        seg_index=seg_index,
                        i_start=i_start,
                        i_stop=i_stop,
                        channel_indexes=channel_indexes)
                    float_signal = self.rescale_signal_raw_to_float(
                        raw_signal,
                        dtype='float32',
                        channel_indexes=channel_indexes)

                for i, (ind_within,
                        ind_abs) in self._make_signal_channel_subgroups(
                            channel_indexes,
                            signal_group_mode=signal_group_mode).items():
                    units = np.unique(signal_channels[ind_abs]['units'])
                    assert len(units) == 1
                    units = ensure_signal_units(units[0])

                    if signal_group_mode == 'split-all':
                        # in that case annotations by channel is OK
                        chan_index = ind_abs[0]
                        d = self.raw_annotations['blocks'][block_index][
                            'segments'][seg_index]['signals'][chan_index]
                        annotations = dict(d)
                        if 'name' not in annotations:
                            annotations['name'] = signal_channels['name'][
                                chan_index]
                    else:
                        # when channel are grouped by same unit
                        # annotations are empty...
                        annotations = {}
                        annotations['name'] = 'Channel bundle ({}) '.format(
                            ','.join(signal_channels[ind_abs]['name']))
                    annotations = check_annotations(annotations)
                    if lazy:
                        anasig = AnalogSignal(np.array([]),
                                              units=units,
                                              copy=False,
                                              sampling_rate=sr,
                                              t_start=sig_t_start,
                                              **annotations)
                        anasig.lazy_shape = (sig_size, len(ind_within))
                    else:
                        anasig = AnalogSignal(float_signal[:, ind_within],
                                              units=units,
                                              copy=False,
                                              sampling_rate=sr,
                                              t_start=sig_t_start,
                                              **annotations)
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            if not lazy and load_waveforms:
                raw_waveforms = self.get_spike_raw_waveforms(
                    block_index=block_index,
                    seg_index=seg_index,
                    unit_index=unit_index,
                    t_start=t_start_,
                    t_stop=t_stop_)
                float_waveforms = self.rescale_waveforms_to_float(
                    raw_waveforms, dtype='float32', unit_index=unit_index)
                wf_units = ensure_signal_units(
                    unit_channels['wf_units'][unit_index])
                waveforms = pq.Quantity(float_waveforms,
                                        units=wf_units,
                                        dtype='float32',
                                        copy=False)
                wf_sampling_rate = unit_channels['wf_sampling_rate'][
                    unit_index]
                wf_left_sweep = unit_channels['wf_left_sweep'][unit_index]
                if wf_left_sweep > 0:
                    wf_left_sweep = float(
                        wf_left_sweep) / wf_sampling_rate * pq.s
                else:
                    wf_left_sweep = None
                wf_sampling_rate = wf_sampling_rate * pq.Hz
            else:
                waveforms = None
                wf_left_sweep = None
                wf_sampling_rate = None

            d = self.raw_annotations['blocks'][block_index]['segments'][
                seg_index]['units'][unit_index]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = unit_channels['name'][c]
            annotations = check_annotations(annotations)

            if not lazy:
                spike_timestamp = self.get_spike_timestamps(
                    block_index=block_index,
                    seg_index=seg_index,
                    unit_index=unit_index,
                    t_start=t_start_,
                    t_stop=t_stop_)
                spike_times = self.rescale_spike_timestamp(
                    spike_timestamp, 'float64')
                sptr = SpikeTrain(spike_times,
                                  units='s',
                                  copy=False,
                                  t_start=seg_t_start,
                                  t_stop=seg_t_stop,
                                  waveforms=waveforms,
                                  left_sweep=wf_left_sweep,
                                  sampling_rate=wf_sampling_rate,
                                  **annotations)
            else:
                nb = self.spike_count(block_index=block_index,
                                      seg_index=seg_index,
                                      unit_index=unit_index)
                sptr = SpikeTrain(np.array([]),
                                  units='s',
                                  copy=False,
                                  t_start=seg_t_start,
                                  t_stop=seg_t_stop,
                                  **annotations)
                sptr.lazy_shape = (nb, )

            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if not lazy:
                ev_timestamp, ev_raw_durations, ev_labels = self.get_event_timestamps(
                    block_index=block_index,
                    seg_index=seg_index,
                    event_channel_index=chan_ind,
                    t_start=t_start_,
                    t_stop=t_stop_)
                ev_times = self.rescale_event_timestamp(
                    ev_timestamp, 'float64') * pq.s
                if ev_raw_durations is None:
                    ev_durations = None
                else:
                    ev_durations = self.rescale_epoch_duration(
                        ev_raw_durations, 'float64') * pq.s
                ev_labels = ev_labels.astype('S')
            else:
                nb = self.event_count(block_index=block_index,
                                      seg_index=seg_index,
                                      event_channel_index=chan_ind)
                lazy_shape = (nb, )
                ev_times = np.array([]) * pq.s
                ev_labels = np.array([], dtype='S')
                ev_durations = np.array([]) * pq.s

            d = self.raw_annotations['blocks'][block_index]['segments'][
                seg_index]['events'][chan_ind]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = event_channels['name'][chan_ind]

            annotations = check_annotations(annotations)

            if event_channels['type'][chan_ind] == b'event':
                e = Event(times=ev_times,
                          labels=ev_labels,
                          units='s',
                          copy=False,
                          **annotations)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = Epoch(times=ev_times,
                          durations=ev_durations,
                          labels=ev_labels,
                          units='s',
                          copy=False,
                          **annotations)
                e.segment = seg
                seg.epochs.append(e)

            if lazy:
                e.lazy_shape = lazy_shape

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 25
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
    ):

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        #~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=globalHeader['version'])
        seg.annotate(comment=globalHeader['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(globalHeader['nvar']):
            entityHeader = HeaderReader(
                fid, EntityHeader).read_f(offset=offset + i * 208)
            entityHeader['name'] = entityHeader['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entityHeader['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    spike_times = spike_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s,
                    t_stop=globalHeader['tend'] / globalHeader['freq'] * pq.s,
                    name=entityHeader['name'],
                )
                if lazy:
                    sptr.lazy_shape = entityHeader['n']
                sptr.annotate(channel_index=entityHeader['WireNumber'])
                seg.spiketrains.append(sptr)

            if entityHeader['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    event_times = event_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = EventArray(times=event_times,
                                  labels=labels,
                                  channel_name=entityHeader['name'])
                if lazy:
                    evar.lazy_shape = entityHeader['n']
                seg.eventarrays.append(evar)

            if entityHeader['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    start_times = start_times.astype(
                        'f8') / globalHeader['freq'] * pq.s
                    stop_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4,
                    )
                    stop_times = stop_times.astype(
                        'f') / globalHeader['freq'] * pq.s
                epar = EpochArray(times=start_times,
                                  durations=stop_times - start_times,
                                  labels=np.array([''] * start_times.size,
                                                  dtype='S'),
                                  channel_name=entityHeader['name'])
                if lazy:
                    epar.lazy_shape = entityHeader['n']
                seg.epocharrays.append(epar)

            if entityHeader['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    spike_times = spike_times.astype(
                        'f8') / globalHeader['freq'] * pq.s

                    waveforms = np.memmap(
                        self.filename,
                        np.dtype('i2'),
                        'r',
                        shape=(entityHeader['n'], 1,
                               entityHeader['NPointsWave']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4,
                    )
                    waveforms = (waveforms.astype('f') * entityHeader['ADtoMV']
                                 + entityHeader['MVOffset']) * pq.mV
                t_stop = globalHeader['tend'] / globalHeader['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=globalHeader['tbeg'] / globalHeader['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop,
                    name=entityHeader['name'],
                    waveforms=waveforms,
                    sampling_rate=entityHeader['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms,
                )
                if lazy:
                    sptr.lazy_shape = entityHeader['n']
                sptr.annotate(channel_index=entityHeader['WireNumber'])
                seg.spiketrains.append(sptr)

            if entityHeader['type'] == 4:
                # popvectors
                pass

            if entityHeader['type'] == 5:
                # analog

                timestamps = np.memmap(
                    self.filename,
                    np.dtype('i4'),
                    'r',
                    shape=(entityHeader['n']),
                    offset=entityHeader['offset'],
                )
                timestamps = timestamps.astype('f8') / globalHeader['freq']
                fragmentStarts = np.memmap(
                    self.filename,
                    np.dtype('i4'),
                    'r',
                    shape=(entityHeader['n']),
                    offset=entityHeader['offset'],
                )
                fragmentStarts = fragmentStarts.astype(
                    'f8') / globalHeader['freq']
                t_start = timestamps[0] - fragmentStarts[0] / float(
                    entityHeader['WFrequency'])
                del timestamps, fragmentStarts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(
                        self.filename,
                        np.dtype('i2'),
                        'r',
                        shape=(entityHeader['NPointsWave']),
                        offset=entityHeader['offset'],
                    )
                    signal = signal.astype('f')
                    signal *= entityHeader['ADtoMV']
                    signal += entityHeader['MVOffset']
                    signal = signal * pq.mV

                anaSig = AnalogSignal(
                    signal=signal,
                    t_start=t_start * pq.s,
                    sampling_rate=entityHeader['WFrequency'] * pq.Hz,
                    name=entityHeader['name'],
                    channel_index=entityHeader['WireNumber'])
                if lazy:
                    anaSig.lazy_shape = entityHeader['NPointsWave']
                seg.analogsignals.append(anaSig)

            if entityHeader['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(
                        self.filename,
                        np.dtype('i4'),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'],
                    )
                    times = times.astype('f8') / globalHeader['freq'] * pq.s
                    fid.seek(entityHeader['offset'] + entityHeader['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename,
                        np.dtype('S' + str(entityHeader['MarkerLength'])),
                        'r',
                        shape=(entityHeader['n']),
                        offset=entityHeader['offset'] + entityHeader['n'] * 4 +
                        64)
                ea = EventArray(times=times,
                                labels=labels.view(np.ndarray),
                                name=entityHeader['name'],
                                channel_index=entityHeader['WireNumber'],
                                marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entityHeader['n']
                seg.eventarrays.append(ea)

        create_many_to_one_relationship(seg)
        return seg
Exemplo n.º 26
0
    def read_segment(self,
                     blockname=None,
                     lazy=False,
                     cascade=True,
                     sortname=''):
        """
        Read a single segment from the tank. Note that TDT blocks are Neo
        segments, and TDT tanks are Neo blocks, so here the 'blockname' argument
        refers to the TDT block's name, which will be the Neo segment name.

        'sortname' is used to specify the external sortcode generated by offline spike sorting.
        if sortname=='PLX', there should be a ./sort/PLX/*.SortResult file in the tdt block,
        which stores the sortcode for every spike; defaults to '', which uses the original online sort
        """
        if not blockname:
            blockname = os.listdir(self.dirname)[0]

        if blockname == 'TempBlk': return None

        if not self.is_tdtblock(blockname): return None  # if not a tdt block

        subdir = os.path.join(self.dirname, blockname)
        if not os.path.isdir(subdir): return None

        seg = Segment(name=blockname)

        tankname = os.path.basename(self.dirname)

        #TSQ is the global index
        tsq_filename = os.path.join(subdir,
                                    tankname + '_' + blockname + '.tsq')
        dt = [
            ('size', 'int32'),
            ('evtype', 'int32'),
            ('code', 'S4'),
            ('channel', 'uint16'),
            ('sortcode', 'uint16'),
            ('timestamp', 'float64'),
            ('eventoffset', 'int64'),
            ('dataformat', 'int32'),
            ('frequency', 'float32'),
        ]
        tsq = np.fromfile(tsq_filename, dtype=dt)

        #0x8801: 'EVTYPE_MARK' give the global_start
        global_t_start = tsq[tsq['evtype'] == 0x8801]['timestamp'][0]

        #TEV is the old data file
        try:
            tev_filename = os.path.join(subdir,
                                        tankname + '_' + blockname + '.tev')
            #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
            tev_array = np.fromfile(tev_filename, dtype='uint8')
        except IOError:
            tev_filename = None

        #if there exists an external sortcode in ./sort/[sortname]/*.SortResult (generated after offline sortting)
        sortresult_filename = None
        if sortname is not '':
            try:
                for file in os.listdir(os.path.join(subdir, 'sort', sortname)):
                    if file.endswith(".SortResult"):
                        sortresult_filename = os.path.join(
                            subdir, 'sort', sortname, file)

                        # get new sortcode
                        newsorcode = np.fromfile(sortresult_filename, 'int8')[
                            1024:]  # the first 1024 byte is file header
                        # update the sort code with the info from this file
                        tsq['sortcode'][1:-1] = newsorcode
                        # print('sortcode updated')
                        break
            except OSError:
                sortresult_filename = None
            except IOError:
                sortresult_filename = None

        for type_code, type_label in tdt_event_type:
            mask1 = tsq['evtype'] == type_code
            codes = np.unique(tsq[mask1]['code'])

            for code in codes:
                mask2 = mask1 & (tsq['code'] == code)
                channels = np.unique(tsq[mask2]['channel'])

                for channel in channels:
                    mask3 = mask2 & (tsq['channel'] == channel)

                    if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                        if lazy:
                            times = [] * pq.s
                            labels = np.array([], dtype=str)
                        else:
                            times = (tsq[mask3]['timestamp'] -
                                     global_t_start) * pq.s
                            labels = tsq[mask3]['eventoffset'].view(
                                'float64').astype('S')
                        ea = Event(times=times,
                                   name=code,
                                   channel_index=int(channel),
                                   labels=labels)
                        if lazy:
                            ea.lazy_shape = np.sum(mask3)
                        seg.events.append(ea)

                    elif type_label == 'EVTYPE_SNIP':
                        sortcodes = np.unique(tsq[mask3]['sortcode'])
                        for sortcode in sortcodes:
                            mask4 = mask3 & (tsq['sortcode'] == sortcode)
                            nb_spike = np.sum(mask4)
                            sr = tsq[mask4]['frequency'][0]
                            waveformsize = tsq[mask4]['size'][0] - 10
                            if lazy:
                                times = [] * pq.s
                                waveforms = None
                            else:
                                times = (tsq[mask4]['timestamp'] -
                                         global_t_start) * pq.s
                                dt = np.dtype(
                                    data_formats[tsq[mask3]['dataformat'][0]])
                                waveforms = get_chunks(
                                    tsq[mask4]['size'],
                                    tsq[mask4]['eventoffset'],
                                    tev_array).view(dt)
                                waveforms = waveforms.reshape(
                                    nb_spike, -1, waveformsize)
                                waveforms = waveforms * pq.mV
                            if nb_spike > 0:
                                #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                t_start = 0 * pq.s
                                t_stop = (tsq['timestamp'][-1] -
                                          global_t_start) * pq.s

                            else:
                                t_start = 0 * pq.s
                                t_stop = 0 * pq.s
                            st = SpikeTrain(
                                times=times,
                                name='Chan{0} Code{1}'.format(
                                    channel, sortcode),
                                t_start=t_start,
                                t_stop=t_stop,
                                waveforms=waveforms,
                                left_sweep=waveformsize / 2. / sr * pq.s,
                                sampling_rate=sr * pq.Hz,
                            )
                            st.annotate(channel_index=channel)
                            if lazy:
                                st.lazy_shape = nb_spike
                            seg.spiketrains.append(st)

                    elif type_label == 'EVTYPE_STREAM':
                        dt = np.dtype(
                            data_formats[tsq[mask3]['dataformat'][0]])
                        shape = np.sum(tsq[mask3]['size'] - 10)
                        sr = tsq[mask3]['frequency'][0]
                        if lazy:
                            signal = []
                        else:
                            if PY3K:
                                signame = code.decode('ascii')
                            else:
                                signame = code
                            sev_filename = os.path.join(
                                subdir, tankname + '_' + blockname + '_' +
                                signame + '_ch' + str(channel) + '.sev')
                            try:
                                #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                sig_array = np.fromfile(sev_filename,
                                                        dtype='uint8')
                            except IOError:
                                sig_array = tev_array
                            signal = get_chunks(tsq[mask3]['size'],
                                                tsq[mask3]['eventoffset'],
                                                sig_array).view(dt)

                        anasig = AnalogSignal(
                            signal=signal * pq.V,
                            name='{0} {1}'.format(code, channel),
                            sampling_rate=sr * pq.Hz,
                            t_start=(tsq[mask3]['timestamp'][0] -
                                     global_t_start) * pq.s,
                            channel_index=int(channel))
                        if lazy:
                            anasig.lazy_shape = shape
                        seg.analogsignals.append(anasig)
        return seg
Exemplo n.º 27
0
    def read_block(self, lazy=False, cascade=True):
        """Returns a Block containing spike information.

        There is no obvious way to infer the segment boundaries from
        raw spike times, so for now all spike times are returned in one
        big segment. The way around this would be to specify the segment
        boundaries, and then change this code to put the spikes in the right
        segments.
        """
        # Create block and segment to hold all the data
        block = Block()
        # Search data directory for KlustaKwik files.
        # If nothing found, return empty block
        self._fetfiles = self._fp.read_filenames('fet')
        self._clufiles = self._fp.read_filenames('clu')
        if len(self._fetfiles) == 0 or not cascade:
            return block

        # Create a single segment to hold all of the data
        seg = Segment(name='seg0', index=0, file_origin=self.filename)
        block.segments.append(seg)

        # Load spike times from each group and store in a dict, keyed
        # by group number
        self.spiketrains = dict()
        for group in sorted(self._fetfiles.keys()):
            # Load spike times
            fetfile = self._fetfiles[group]
            spks, features = self._load_spike_times(fetfile)

            # Load cluster ids or generate
            if group in self._clufiles:
                clufile = self._clufiles[group]
                uids = self._load_unit_id(clufile)
            else:
                # unclustered data, assume all zeros
                uids = np.zeros(spks.shape, dtype=np.int32)

            # error check
            if len(spks) != len(uids):
                raise ValueError("lengths of fet and clu files are different")

            # Create Unit for each cluster
            unique_unit_ids = np.unique(uids)
            for unit_id in sorted(unique_unit_ids):
                # Initialize the unit
                u = Unit(name=('unit %d from group %d' % (unit_id, group)),
                         index=unit_id,
                         group=group)

                # Initialize a new SpikeTrain for the spikes from this unit
                if lazy:
                    st = SpikeTrain(times=[],
                                    units='sec',
                                    t_start=0.0,
                                    t_stop=spks.max() / self.sampling_rate,
                                    name=('unit %d from group %d' %
                                          (unit_id, group)))
                    st.lazy_shape = len(spks[uids == unit_id])
                else:
                    st = SpikeTrain(
                        times=spks[uids == unit_id] / self.sampling_rate,
                        units='sec',
                        t_start=0.0,
                        t_stop=spks.max() / self.sampling_rate,
                        name=('unit %d from group %d' % (unit_id, group)))
                st.annotations['cluster'] = unit_id
                st.annotations['group'] = group

                # put features in
                if not lazy and len(features) != 0:
                    st.annotations['waveform_features'] = features

                # Link
                u.spiketrains.append(st)
                seg.spiketrains.append(st)

        block.create_many_to_one_relationship()
        return block
Exemplo n.º 28
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)
            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            #TSQ is the global index
            tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq')
            dt = [('size','int32'),
                        ('evtype','int32'),
                        ('code','S4'),
                        ('channel','uint16'),
                        ('sortcode','uint16'),
                        ('timestamp','float64'),
                        ('eventoffset','int64'),
                        ('dataformat','int32'),
                        ('frequency','float32'),
                    ]
            tsq = np.fromfile(tsq_filename, dtype = dt)
            
            #0x8801: 'EVTYPE_MARK' give the global_start
            global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0]
           
            #TEV is the old data file
            if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev')
                #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                tev_array = np.fromfile(tev_filename, dtype = 'uint8')
                
            else:
                tev_filename = None


            for type_code, type_label in tdt_event_type:
                mask1 = tsq['evtype']==type_code
                codes = np.unique(tsq[mask1]['code'])
                
                for code in codes:
                    mask2 = mask1 & (tsq['code']==code)
                    channels = np.unique(tsq[mask2]['channel'])
                    
                    for channel in channels:
                        mask3 = mask2 & (tsq['channel']==channel)
                        
                        if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                            if lazy:
                                times = [ ]*pq.s
                                labels = np.array([ ], dtype = str)
                            else:
                                times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
                                labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
                            ea = EventArray(times = times, name = code , channel_index = int(channel), labels = labels)
                            if lazy:
                                ea.lazy_shape = np.sum(mask3)
                            seg.eventarrays.append(ea)
                        
                        elif type_label == 'EVTYPE_SNIP':
                            sortcodes = np.unique(tsq[mask3]['sortcode'])
                            for sortcode in sortcodes:
                                mask4 = mask3 & (tsq['sortcode']==sortcode)
                                nb_spike = np.sum(mask4)
                                sr = tsq[mask4]['frequency'][0]
                                waveformsize = tsq[mask4]['size'][0]-10
                                if lazy:
                                    times = [ ]*pq.s
                                    waveforms = None
                                else:
                                    times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s
                                    dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])                                    
                                    waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt)
                                    waveforms = waveforms.reshape(nb_spike, -1, waveformsize)
                                    waveforms = waveforms * pq.mV
                                if nb_spike>0:
                                 #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                    t_start = 0 *pq.s
                                    t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s
                                    
                                else:
                                    t_start = 0 *pq.s
                                    t_stop = 0 *pq.s
                                st = SpikeTrain(times = times, 
                                                                name = 'Chan{} Code{}'.format(channel,sortcode),
                                                                t_start = t_start,
                                                                t_stop = t_stop,
                                                                waveforms = waveforms,
                                                                left_sweep = waveformsize/2./sr * pq.s,
                                                                sampling_rate = sr * pq.Hz,
                                                                )
                                st.annotate(channel_index = channel)
                                if lazy:
                                    st.lazy_shape = nb_spike
                                seg.spiketrains.append(st)
                        
                        elif type_label == 'EVTYPE_STREAM':
                            dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
                            shape = np.sum(tsq[mask3]['size']-10)
                            sr = tsq[mask3]['frequency'][0]
                            if lazy:
                                signal = [ ]
                            else:
                                if PY3K:
                                    signame = code.decode('ascii')
                                else:
                                    signame = code
                                sev_filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(channel)+'.sev')
                                if os.path.exists(sev_filename):
                                    #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                    sig_array = np.fromfile(sev_filename, dtype = 'uint8')
                                else:
                                    sig_array = tev_array
                                signal = get_chunks(tsq[mask3]['size'],tsq[mask3]['eventoffset'],  sig_array).view(dt)
                            
                            anasig = AnalogSignal(signal = signal* pq.V,
                                                                    name = '{} {}'.format(code, channel),
                                                                    sampling_rate= sr * pq.Hz,
                                                                    t_start = (tsq[mask3]['timestamp'][0] - global_t_start) * pq.s,
                                                                    channel_index = int(channel))
                            if lazy:
                                anasig.lazy_shape = shape
                            seg.analogsignals.append(anasig)
        bl.create_many_to_one_relationship()
        return bl
Exemplo n.º 29
0
    def readOneChannelEventOrSpike(self, fid, channel_num, header, lazy=True):
        # return SPikeTrain or EventArray
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock < 0: return
        if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]: return

        ## Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick', 'i4')]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick', 'i4'), ('marker', 'i4')]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('adc', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('real', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('label', 'S%d' % channelHeader.n_extra)]
        dt = np.dtype(fmt)

        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0:
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems

        if lazy:
            if channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = EventArray()
                ea.annotate(channel_index=channel_num)
                ea.lazy_shape = totalitems
                return [ea]

            elif channelHeader.kind in [6, 7]:
                sptr = SpikeTrain(
                    [] * pq.s,
                    t_stop=1e99)  # correct value for t_stop to be put in later
                sptr.annotate(channel_index=channel_num, ced_unit=0)
                sptr.lazy_shape = totalitems
                return [sptr]

        else:
            alltrigs = np.zeros(totalitems, dtype=dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks):
                blockHeader = HeaderReader(fid,
                                           np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring(fid.read(blockHeader.items *
                                               dt.itemsize),
                                      dtype=dt)

                alltrigs[pos:pos + trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class : eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype(
                'f') * header.us_per_time * header.dtime_base * pq.s

            if channelHeader.kind in [2, 3, 4, 5, 8]:
                #events
                ea = EventArray()
                ea.annotate(channel_index=channel_num)
                ea.times = alltimes
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels=alltrigs['label'])
                return [ea]

            elif channelHeader.kind in [6, 7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6:
                    waveforms = np.fromstring(alltrigs['adc'].tostring(),
                                              dtype='i2')
                    waveforms = waveforms.astype(
                        'f4'
                    ) * channelHeader.scale / 6553.6 + channelHeader.offset
                elif channelHeader.kind == 7:
                    waveforms = np.fromstring(alltrigs['real'].tostring(),
                                              dtype='f4')

                if header.system_id >= 6 and channelHeader.interleave > 1:
                    waveforms = waveforms.reshape(
                        (alltimes.size, -1, channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1, 2)
                else:
                    waveforms = waveforms.reshape((alltimes.size, 1, -1))

                if header.system_id in [1, 2, 3, 4, 5]:
                    sample_interval = (channelHeader.divide *
                                       header.us_per_time *
                                       header.time_per_adc) * 1e-6
                else:
                    sample_interval = (channelHeader.l_chan_dvd *
                                       header.us_per_time * header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit])
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit)
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    t_stop = alltimes.max(
                    )  # can get better value from associated AnalogSignal(s) ?
                else:
                    t_stop = 0.0

                if not self.ced_units:
                    sptr = SpikeTrain(alltimes,
                                      waveforms=waveforms * unit,
                                      sampling_rate=(1. / sample_interval) *
                                      pq.Hz,
                                      t_stop=t_stop)
                    sptr.annotate(channel_index=channel_num, ced_unit=0)
                    return [sptr]

                sptrs = []
                for i in set(alltrigs['marker'] & 255):
                    sptr = SpikeTrain(
                        alltimes[alltrigs['marker'] == i],
                        waveforms=waveforms[alltrigs['marker'] == i] * unit,
                        sampling_rate=(1. / sample_interval) * pq.Hz,
                        t_stop=t_stop)
                    sptr.annotate(channel_index=channel_num, ced_unit=i)
                    sptrs.append(sptr)

                return sptrs
Exemplo n.º 30
0
    def read_nev(self, filename_nev, seg, lazy, cascade, load_waveforms=False):
        # basic hedaer
        dt = [
            ('header_id', 'S8'),
            ('ver_major', 'uint8'),
            ('ver_minor', 'uint8'),
            ('additionnal_flag',
             'uint16'),  # Read flags, currently basically unused
            ('header_size', 'uint32'),  #i.e. index of first data
            ('packet_size',
             'uint32'),  # Read number of packet bytes, i.e. byte per sample
            ('sampling_rate', 'uint32'
             ),  # Read time resolution in Hz of time stamps, i.e. data packets
            ('waveform_sampling_rate',
             'uint32'),  # Read sampling frequency of waveforms in Hz
            ('window_datetime', 'S16'),
            ('application', 'S32'),  # 
            ('comments', 'S256'),  # comments
            ('num_ext_header', 'uint32')  #Read number of extended headers
        ]
        nev_header = h = np.fromfile(filename_nev, count=1, dtype=dt)[0]
        version = '{}.{}'.format(h['ver_major'], h['ver_minor'])
        assert h['header_id'].decode(
            'ascii'
        ) == 'NEURALEV' or version == '2.1', 'Unsupported version {}'.format(
            version)
        version = '{}.{}'.format(h['ver_major'], h['ver_minor'])
        seg.annotate(blackrock_version=version)
        seg.rec_datetime = get_window_datetime(nev_header['window_datetime'])
        sr = float(h['sampling_rate'])
        wsr = float(h['waveform_sampling_rate'])

        if not cascade:
            return

        # extented header
        # this consist in N block with code 8bytes + 24 data bytes
        # the data bytes depend on the code and need to be converted cafilename_nsx, segse by case
        raw_ext_header = np.memmap(filename_nev,
                                   offset=np.dtype(dt).itemsize,
                                   dtype=[('code', 'S8'), ('data', 'S24')],
                                   shape=h['num_ext_header'])
        # this is for debuging
        ext_header = {}
        for code, dt_ext in ext_nev_header_codes.items():
            sel = raw_ext_header['code'] == code
            ext_header[code] = raw_ext_header[sel].view(dt_ext)

        # channel label
        neuelbl_header = ext_header['NEUEVLBL']
        channel_labels = dict(
            zip(neuelbl_header['channel_id'], neuelbl_header['channel_label']))

        # TODO ext_header['DIGLABEL'] is there only one label ???? because no id in that case
        # TODO ECOMMENT + CCOMMENT for annotations
        # TODO NEUEVFLT for annotations

        # read data packet and markers
        dt0 = [
            ('samplepos', 'uint32'),
            ('id', 'uint16'),
            ('value', 'S{}'.format(h['packet_size'] - 6)),
        ]
        data = np.memmap(filename_nev, offset=h['header_size'], dtype=dt0)
        all_ids = np.unique(data['id'])

        t_start = 0 * pq.s
        t_stop = data['samplepos'][-1] / sr * pq.s

        # read event (digital 9+ analog+comment)
        def create_event_array_trig_or_analog(selection, name, labelmode=None):
            if lazy:
                times = []
                labels = np.array([], dtype='S')
            else:
                times = data_trigger['samplepos'][selection].astype(float) / sr
                if labelmode == 'digital_port':
                    labels = data_trigger['digital_port'][selection].astype(
                        'S2')
                elif labelmode is None:
                    label = None
            ev = EventArray(times=times * pq.s, labels=labels, name=name)
            if lazy:
                ev.lazy_shape = np.sum(is_digital)
            seg.eventarrays.append(ev)

        mask = (data['id'] == 0)
        dt_trig = [
            ('samplepos', 'uint32'),
            ('id', 'uint16'),
            ('reason', 'uint8'),
            ('reserved0', 'uint8'),
            ('digital_port', 'uint16'),
            ('reserved1', 'S{}'.format(h['packet_size'] - 10)),
        ]
        data_trigger = data.view(dt_trig)[mask]
        # Digital Triggers (PaquetID 0)
        is_digital = (data_trigger['reason'] & 1) > 0
        create_event_array_trig_or_analog(is_digital,
                                          'Digital trigger',
                                          labelmode='digital_port')

        # Analog Triggers (PaquetID 0)
        if version in ['2.1', '2.2']:
            for i in range(5):
                is_analog = (data_trigger['reason'] & (2**(i + 1))) > 0
                create_event_array_trig_or_analog(
                    is_analog, 'Analog trigger {}'.format(i), labelmode=None)

        # Comments
        mask = (data['id'] == 0xFFF)
        dt_comments = [
            ('samplepos', 'uint32'),
            ('id', 'uint16'),
            ('charset', 'uint8'),
            ('reserved0', 'uint8'),
            ('color', 'uint32'),
            ('comment', 'S{}'.format(h['packet_size'] - 12)),
        ]
        data_comments = data.view(dt_comments)[mask]
        if data_comments.size > 0:
            if lazy:
                times = []
                labels = []
            else:
                times = data_comments['samplepos'].astype(float) / sr
                labels = data_comments['comment'].astype('S')
            ev = EventArray(times=times * pq.s, labels=labels, name='Comments')
            if lazy:
                ev.lazy_shape = np.sum(is_digital)
            seg.eventarrays.append(ev)

        # READ Spike channel
        channel_ids = all_ids[(all_ids > 0) & (all_ids <= 2048)]

        # get the dtype of waveform (this is stupidly complicated)
        if nev_header['additionnal_flag'] & 0x1:
            #dtype_waveforms = { k:'int16' for k in channel_ids }
            dtype_waveforms = dict((k, 'int16') for k in channel_ids)
        else:
            # there is a code electrodes by electrodes given the approiate dtype
            neuewav_header = ext_header['NEUEVWAV']
            dtype_waveform = dict(
                zip(neuewav_header['channel_id'],
                    neuewav_header['num_bytes_per_waveform']))
            dtypes_conv = {0: 'int8', 1: 'int8', 2: 'int16', 4: 'int32'}
            #dtype_waveforms = { k:dtypes_conv[v] for k,v in dtype_waveform.items() }
            dtype_waveforms = dict(
                (k, dtypes_conv[v]) for k, v in dtype_waveform.items())

        dt2 = [
            ('samplepos', 'uint32'),
            ('id', 'uint16'),
            ('cluster', 'uint8'),
            ('reserved0', 'uint8'),
            ('waveform', 'uint8', (h['packet_size'] - 8, )),
        ]
        data_spike = data.view(dt2)

        for channel_id in channel_ids:
            data_spike_chan = data_spike[data['id'] == channel_id]
            cluster_ids = np.unique(data_spike_chan['cluster'])
            for cluster_id in cluster_ids:
                if cluster_id == 0:
                    name = 'unclassified'
                elif cluster_id == 255:
                    name = 'noise'
                else:
                    name = 'Cluster {}'.format(cluster_id)
                name = 'Channel {} '.format(channel_id) + name

                data_spike_chan_clus = data_spike_chan[
                    data_spike_chan['cluster'] == cluster_id]
                n_spike = data_spike_chan_clus.size
                waveforms, w_sampling_rate, left_sweep = None, None, None
                if lazy:
                    times = []
                else:
                    times = data_spike_chan_clus['samplepos'].astype(
                        float) / sr
                    if load_waveforms:
                        dtype_waveform = dtype_waveforms[channel_id]
                        waveform_size = (h['packet_size'] -
                                         8) / np.dtype(dtype_waveform).itemsize
                        waveforms = data_spike_chan_clus['waveform'].flatten(
                        ).view(dtype_waveform)
                        waveforms = waveforms.reshape(n_spike, 1,
                                                      waveform_size)
                        waveforms = waveforms * pq.uV
                        w_sampling_rate = wsr * pq.Hz
                        left_sweep = waveform_size // 2 / sr * pq.s
                st = SpikeTrain(times=times * pq.s,
                                name=name,
                                t_start=t_start,
                                t_stop=t_stop,
                                waveforms=waveforms,
                                sampling_rate=w_sampling_rate,
                                left_sweep=left_sweep)
                st.annotate(channel_index=int(channel_id))
                if lazy:
                    st.lazy_shape = n_spike
                seg.spiketrains.append(st)
Exemplo n.º 31
0
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

                anaSig = AnalogSignal(signal,
                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
                                                    t_start = pdTime.value * pq.s,
                                                    name = str(entityInfo.szEntityLabel),
                                                    )
                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append( anaSig )


            #segment
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )

                if lazy:
                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp  = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID= ctypes.c_uint32(0)

                    nsample  = int(dwDataBufferSize)
                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                                ctypes.byref(pdwUnitID ) )

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
                    
                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
                                        t_stop = times.max(),
                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
                                        name = str(entityInfo.szEntityLabel),
                                        )
                seg.spiketrains.append(sptr)


            # neuralevent
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [ ]*pq.s
                    t_stop = 0*pq.s
                else:
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData*pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(times, t_stop =t_stop,
                                                name = str(entityInfo.szEntityLabel),)
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 32
0
    def read_segment(self, blockname=None, lazy=False, cascade=True, sortname=''):
        """
        Read a single segment from the tank. Note that TDT blocks are Neo
        segments, and TDT tanks are Neo blocks, so here the 'blockname' argument
        refers to the TDT block's name, which will be the Neo segment name.
        sortname is used to specify the external sortcode generated by offline spike sorting, if sortname=='PLX',
        there should be a ./sort/PLX/*.SortResult file in the tdt block, which stores the sortcode for every spike,
        default to '', which uses the original online sort
        """
        if not blockname:
            blockname = os.listdir(self.dirname)[0]

        if blockname == 'TempBlk': return None

        if not self.is_tdtblock(blockname): return None    # if not a tdt block

        subdir = os.path.join(self.dirname, blockname)
        if not os.path.isdir(subdir): return None

        seg = Segment(name=blockname)

        tankname = os.path.basename(self.dirname)

        #TSQ is the global index
        tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq')
        dt = [('size','int32'),
                    ('evtype','int32'),
                    ('code','S4'),
                    ('channel','uint16'),
                    ('sortcode','uint16'),
                    ('timestamp','float64'),
                    ('eventoffset','int64'),
                    ('dataformat','int32'),
                    ('frequency','float32'),
                ]
        tsq = np.fromfile(tsq_filename, dtype=dt)

        #0x8801: 'EVTYPE_MARK' give the global_start
        global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0]

        #TEV is the old data file
        try:
            tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev')
            #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
            tev_array = np.fromfile(tev_filename, dtype='uint8')
        except IOError:
            tev_filename = None


        #if exists an external sortcode in ./sort/[sortname]/*.SortResult (generated after offline sortting)
        sortresult_filename = None
        if sortname is not '':
            try:
                for file in os.listdir(os.path.join(subdir, 'sort', sortname)):
                    if file.endswith(".SortResult"):
                        sortresult_filename = os.path.join(subdir, 'sort', sortname, file)

                        # get new sortcode
                        newsorcode = np.fromfile(sortresult_filename,'int8')[1024:]  # the first 1024 byte is file header
                        # update the sort code with the info from this file
                        tsq['sortcode'][1:-1]=newsorcode
                        # print('sortcode updated')
                        break
            except OSError:
                sortresult_filename = None
            except IOError:
                sortresult_filename = None


        for type_code, type_label in tdt_event_type:
            mask1 = tsq['evtype']==type_code
            codes = np.unique(tsq[mask1]['code'])

            for code in codes:
                mask2 = mask1 & (tsq['code']==code)
                channels = np.unique(tsq[mask2]['channel'])

                for channel in channels:
                    mask3 = mask2 & (tsq['channel']==channel)

                    if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                        if lazy:
                            times = [ ]*pq.s
                            labels = np.array([ ], dtype=str)
                        else:
                            times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
                            labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
                        ea = Event(times=times,
                                   name=code,
                                   channel_index=int(channel),
                                   labels=labels)
                        if lazy:
                            ea.lazy_shape = np.sum(mask3)
                        seg.events.append(ea)

                    elif type_label == 'EVTYPE_SNIP':
                        sortcodes = np.unique(tsq[mask3]['sortcode'])
                        for sortcode in sortcodes:
                            mask4 = mask3 & (tsq['sortcode']==sortcode)
                            nb_spike = np.sum(mask4)
                            sr = tsq[mask4]['frequency'][0]
                            waveformsize = tsq[mask4]['size'][0]-10
                            if lazy:
                                times = [ ]*pq.s
                                waveforms = None
                            else:
                                times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s
                                dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
                                waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt)
                                waveforms = waveforms.reshape(nb_spike, -1, waveformsize)
                                waveforms = waveforms * pq.mV
                            if nb_spike > 0:
                             #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                t_start = 0 *pq.s
                                t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s

                            else:
                                t_start = 0 *pq.s
                                t_stop = 0 *pq.s
                            st = SpikeTrain(times           = times,
                                            name            = 'Chan{0} Code{1}'.format(channel,sortcode),
                                            t_start         = t_start,
                                            t_stop          = t_stop,
                                            waveforms       = waveforms,
                                            left_sweep      = waveformsize/2./sr * pq.s,
                                            sampling_rate   = sr * pq.Hz,
                                            )
                            st.annotate(channel_index=channel)
                            if lazy:
                                st.lazy_shape = nb_spike
                            seg.spiketrains.append(st)

                    elif type_label == 'EVTYPE_STREAM':
                        dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
                        shape = np.sum(tsq[mask3]['size']-10)
                        sr = tsq[mask3]['frequency'][0]
                        if lazy:
                            signal = [ ]
                        else:
                            if PY3K:
                                signame = code.decode('ascii')
                            else:
                                signame = code
                            sev_filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(channel)+'.sev')
                            try:
                                #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                sig_array = np.fromfile(sev_filename, dtype='uint8')
                            except IOError:
                                sig_array = tev_array
                            signal = get_chunks(tsq[mask3]['size'],tsq[mask3]['eventoffset'],  sig_array).view(dt)

                        anasig = AnalogSignal(signal        = signal* pq.V,
                                              name          = '{0} {1}'.format(code, channel),
                                              sampling_rate = sr * pq.Hz,
                                              t_start       = (tsq[mask3]['timestamp'][0] - global_t_start) * pq.s,
                                              channel_index = int(channel)
                                              )
                        if lazy:
                            anasig.lazy_shape = shape
                        seg.analogsignals.append(anasig)
        return seg