コード例 #1
0
ファイル: exampleio.py プロジェクト: CINPLA/python-neo
    def read_analogsignal(self ,
                          # the 2 first key arguments are imposed by neo.io API
                          lazy = False,
                          cascade = True,
                          channel_index = 0,
                          segment_duration = 15.,
                          t_start = -1,
                          ):
        """
        With this IO AnalogSignal can e acces directly with its channel number

        """
        sr = 10000.
        sinus_freq = 3. # Hz
        #time vector for generated signal:
        tvect = np.arange(t_start, t_start+ segment_duration , 1./sr)


        if lazy:
            anasig = AnalogSignal([], units='V', sampling_rate=sr * pq.Hz,
                                  t_start=t_start * pq.s,
                                  channel_index=channel_index)
            # we add the attribute lazy_shape with the size if loaded
            anasig.lazy_shape = tvect.shape
        else:
            # create analogsignal (sinus of 3 Hz)
            sig = np.sin(2*np.pi*tvect*sinus_freq + channel_index/5.*2*np.pi)+np.random.rand(tvect.size)
            anasig = AnalogSignal(sig, units= 'V', sampling_rate=sr * pq.Hz,
                                  t_start=t_start * pq.s,
                                  channel_index=channel_index)

        # for attributes out of neo you can annotate
        anasig.annotate(info = 'it is a sinus of %f Hz' %sinus_freq )

        return anasig
コード例 #2
0
ファイル: exampleio.py プロジェクト: toddrjen/python-neo
    def read_analogsignal(self ,
                          # the 2 first key arguments are imposed by neo.io API
                          lazy = False,
                          cascade = True,
                          channel_index = 0,
                          segment_duration = 15.,
                          t_start = -1,
                          ):
        """
        With this IO AnalogSignal can e acces directly with its channel number

        """
        sr = 10000.
        sinus_freq = 3. # Hz
        #time vector for generated signal:
        tvect = np.arange(t_start, t_start+ segment_duration , 1./sr)


        if lazy:
            anasig = AnalogSignal([], units='V', sampling_rate=sr * pq.Hz,
                                  t_start=t_start * pq.s,
                                  channel_index=channel_index)
            # we add the attribute lazy_shape with the size if loaded
            anasig.lazy_shape = tvect.shape
        else:
            # create analogsignal (sinus of 3 Hz)
            sig = np.sin(2*np.pi*tvect*sinus_freq + channel_index/5.*2*np.pi)+np.random.rand(tvect.size)
            anasig = AnalogSignal(sig, units= 'V', sampling_rate=sr * pq.Hz,
                                  t_start=t_start * pq.s,
                                  channel_index=channel_index)

        # for attributes out of neo you can annotate
        anasig.annotate(info = 'it is a sinus of %f Hz' %sinus_freq )

        return anasig
コード例 #3
0
ファイル: kwikio.py プロジェクト: jakirkham/python-neo
 def read_analogsignal(self, channel_index=None, lazy=False, cascade=True):
     """
     Read raw traces
     Arguments:
         channel_index: must be integer array
     """
     if self._attrs["app_data"]:
         bit_volts = self._attrs["app_data"]["channel_bit_volts"]
         sig_unit = "uV"
     else:
         bit_volts = np.ones((self._attrs["shape"][1]))  # TODO: find conversion in phy generated files
         sig_unit = "bit"
     if lazy:
         anasig = AnalogSignal(
             [],
             units=sig_unit,
             sampling_rate=self._attrs["kwik"]["sample_rate"] * pq.Hz,
             t_start=self._attrs["kwik"]["start_time"] * pq.s,
         )
         # we add the attribute lazy_shape with the size if loaded
         anasig.lazy_shape = self._attrs["shape"][0]
     else:
         data = self._kwd["recordings"][str(self._dataset)]["data"].value[:, channel_index]
         data = data * bit_volts[channel_index]
         anasig = AnalogSignal(
             data,
             units=sig_unit,
             sampling_rate=self._attrs["kwik"]["sample_rate"] * pq.Hz,
             t_start=self._attrs["kwik"]["start_time"] * pq.s,
         )
         data = []  # delete from memory
     # for attributes out of neo you can annotate
     anasig.annotate(info="raw traces")
     return anasig
コード例 #4
0
ファイル: neuroshareapiio.py プロジェクト: bal47/python-neo
    def read_analogsignal(self,
                          # the 2 first key arguments are imposed by neo.io
                          lazy = False,
                          cascade = True,
                          #channel index as given by the neuroshare API
                          channel_index = 0,
                          #time in seconds to be read
                          segment_duration = 0.,
                          #time in seconds to start reading from
                          t_start = 0.,
                          ):
        
        #some controls:        
        #if no segment duration is given, use the complete file
        if segment_duration ==0.:
            segment_duration=float(self.metadata["TimeSpan"])
        #if the segment duration is bigger than file, use the complete file
        if segment_duration >=float(self.metadata["TimeSpan"]):
            segment_duration=float(self.metadata["TimeSpan"])
            
        if lazy:
            anasig = AnalogSignal([], units="V", sampling_rate =  self.metadata["sampRate"] * pq.Hz,
                                  t_start=t_start * pq.s,
                                  )
            #create a dummie time vector                     
            tvect = np.arange(t_start, t_start+ segment_duration , 1./self.metadata["sampRate"])                                  
            # we add the attribute lazy_shape with the size if loaded
            anasig.lazy_shape = tvect.shape
        else:
            #get the analog object
            sig =  self.fd.get_entity(channel_index)
            #get the units (V, mV etc)            
            sigUnits = sig.units
            #get the electrode number
            chanName = sig.label[-4:]
            
            #transform t_start into index (reading will start from this index)           
            startat = int(t_start*self.metadata["sampRate"])
            #get the number of bins to read in
            bins = int((segment_duration+t_start) * self.metadata["sampRate"])
            
            #if the number of bins to read is bigger than 
            #the total number of bins, read only till the end of analog object
            if startat+bins > sig.item_count:
                bins = sig.item_count-startat
            #read the data from the sig object
            sig,_,_ = sig.get_data(index = startat, count = bins)
            #store it to the 'AnalogSignal' object
            anasig = AnalogSignal(sig, units = sigUnits, sampling_rate=self.metadata["sampRate"] * pq.Hz,
                                  t_start=t_start * pq.s,
                                  t_stop = (t_start+segment_duration)*pq.s,
                                  channel_index=channel_index)

            # annotate from which electrode the signal comes from
            anasig.annotate(info = "signal from channel %s" %chanName )

        return anasig
コード例 #5
0
ファイル: pynnio.py プロジェクト: csmarc/python-neo
    def _extract_signals(self, data, metadata):

        arr = numpy.vstack(
            self._extract_array(data, channel_index) for channel_index in
            range(metadata['first_index'], metadata['last_index'] + 1))
        if len(arr) > 0:
            signal = AnalogSignal(arr.T,
                                  units=self._determine_units(metadata),
                                  sampling_period=metadata['dt'] * pq.ms)
        signal.annotate(label=metadata["label"], variable=metadata["variable"])
        return signal
コード例 #6
0
    def read_analogsignal(
        self,
        lazy=False,
        # channel index as given by the neuroshare API
        channel_index=0,
        # time in seconds to be read
        segment_duration=0.,
        # time in seconds to start reading from
        t_start=0.,
    ):
        assert not lazy, 'Do not support lazy'

        # some controls:
        # if no segment duration is given, use the complete file
        if segment_duration == 0.:
            segment_duration = float(self.metadata["TimeSpan"])
        # if the segment duration is bigger than file, use the complete file
        if segment_duration >= float(self.metadata["TimeSpan"]):
            segment_duration = float(self.metadata["TimeSpan"])

        # get the analog object
        sig = self.fd.get_entity(channel_index)
        # get the units (V, mV etc)
        sigUnits = sig.units
        # get the electrode number
        chanName = sig.label[-4:]

        # transform t_start into index (reading will start from this index)
        startat = int(t_start * self.metadata["sampRate"])
        # get the number of bins to read in
        bins = int(segment_duration * self.metadata["sampRate"])

        # if the number of bins to read is bigger than
        # the total number of bins, read only till the end of analog object
        if startat + bins > sig.item_count:
            bins = sig.item_count - startat
        # read the data from the sig object
        sig, _, _ = sig.get_data(index=startat, count=bins)
        # store it to the 'AnalogSignal' object
        anasig = AnalogSignal(sig,
                              units=sigUnits,
                              sampling_rate=self.metadata["sampRate"] * pq.Hz,
                              t_start=t_start * pq.s,
                              t_stop=(t_start + segment_duration) * pq.s,
                              channel_index=channel_index)

        # annotate from which electrode the signal comes from
        anasig.annotate(info="signal from channel %s" % chanName)

        return anasig
コード例 #7
0
    def read_analogsignal(
        self,
        channel_index=None,
        lazy=False,
        cascade=True,
    ):
        """
        Read raw traces
        Arguments:
            channel_index: must be integer
        """
        try:
            channel_index = int(channel_index)
        except TypeError:
            print('channel_index must be int, not %s' % type(channel_index))

        if self._attrs['app_data']:
            bit_volts = self._attrs['app_data']['channel_bit_volts']
            sig_unit = 'uV'
        else:
            bit_volts = np.ones(
                (self._attrs['shape'][1]
                 ))  # TODO: find conversion in phy generated files
            sig_unit = 'bit'
        if lazy:
            anasig = AnalogSignal(
                [],
                units=sig_unit,
                sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz,
                t_start=self._attrs['kwik']['start_time'] * pq.s,
                channel_index=channel_index,
            )
            # we add the attribute lazy_shape with the size if loaded
            anasig.lazy_shape = self._attrs['shape'][0]
        else:
            data = self._kwd['recordings'][str(
                self._dataset)]['data'].value[:, channel_index]
            data = data * bit_volts[channel_index]
            anasig = AnalogSignal(
                data,
                units=sig_unit,
                sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz,
                t_start=self._attrs['kwik']['start_time'] * pq.s,
                channel_index=channel_index,
            )
            data = []  # delete from memory
        # for attributes out of neo you can annotate
        anasig.annotate(info='raw traces')
        return anasig
コード例 #8
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))
        cls.populate_dates(blk)

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        cls.populate_dates(seg)
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times,
                                        signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
コード例 #9
0
    def test_annotations(self):
        self.testfilename = self.get_local_path('nix/nixio_fr_ann.nix')
        with NixIO(filename=self.testfilename, mode='ow') as io:
            annotations = {'my_custom_annotation': 'hello block'}
            bl = Block(**annotations)
            annotations = {'something': 'hello hello000'}
            seg = Segment(**annotations)
            an = AnalogSignal([[1, 2, 3], [4, 5, 6]],
                              units='V',
                              sampling_rate=1 * pq.Hz)
            an.annotate(ansigrandom='hello chars')
            an.array_annotate(custom_id=[1, 2, 3])
            sp = SpikeTrain([3, 4, 5] * s, t_stop=10.0)
            sp.annotations['railway'] = 'hello train'
            ev = Event(np.arange(0, 30, 10) * pq.Hz,
                       labels=np.array(['trig0', 'trig1', 'trig2'], dtype='U'))
            ev.annotations['venue'] = 'hello event'
            ev2 = Event(np.arange(0, 30, 10) * pq.Hz,
                        labels=np.array(['trig0', 'trig1', 'trig2'],
                                        dtype='U'))
            ev2.annotations['evven'] = 'hello ev'
            seg.spiketrains.append(sp)
            seg.events.append(ev)
            seg.events.append(ev2)
            seg.analogsignals.append(an)
            bl.segments.append(seg)
            io.write_block(bl)
            io.close()

        with NixIOfr(filename=self.testfilename) as frio:
            frbl = frio.read_block()
            assert 'my_custom_annotation' in frbl.annotations
            assert 'something' in frbl.segments[0].annotations
            assert 'ansigrandom' in frbl.segments[0].analogsignals[
                0].annotations
            assert 'railway' in frbl.segments[0].spiketrains[0].annotations
            assert 'venue' in frbl.segments[0].events[0].annotations
            assert 'evven' in frbl.segments[0].events[1].annotations
            assert 'custom_id' in frbl.segments[0].analogsignals[
                0].array_annotations
            for anasig in frbl.segments[0].analogsignals:
                for value in anasig.array_annotations.values():
                    assert anasig.shape[-1] == len(value)
        os.remove(self.testfilename)
コード例 #10
0
ファイル: kwikio.py プロジェクト: bal47/python-neo
    def read_analogsignal(self,
                      channel_index=None,
                      lazy=False,
                      cascade=True,
                      ):
        """
        Read raw traces
        Arguments:
            channel_index: must be integer
        """
        try:
            channel_index = int(channel_index)
        except TypeError:
            print('channel_index must be int, not %s' %type(channel_index))

        if self._attrs['app_data']:
            bit_volts = self._attrs['app_data']['channel_bit_volts']
            sig_unit = 'uV'
        else:
            bit_volts = np.ones((self._attrs['shape'][1])) # TODO: find conversion in phy generated files
            sig_unit =  'bit'
        if lazy:
            anasig = AnalogSignal([],
                                  units=sig_unit,
                                  sampling_rate=self._attrs['kwik']['sample_rate']*pq.Hz,
                                  t_start=self._attrs['kwik']['start_time']*pq.s,
                                  channel_index=channel_index,
                                  )
            # we add the attribute lazy_shape with the size if loaded
            anasig.lazy_shape = self._attrs['shape'][0]
        else:
            data = self._kwd['recordings'][str(self._dataset)]['data'].value[:,channel_index]
            data = data * bit_volts[channel_index]
            anasig = AnalogSignal(data,
                                       units=sig_unit,
                                       sampling_rate=self._attrs['kwik']['sample_rate']*pq.Hz,
                                       t_start=self._attrs['kwik']['start_time']*pq.s,
                                       channel_index=channel_index,
                                       )
            data = [] # delete from memory
        # for attributes out of neo you can annotate
        anasig.annotate(info='raw traces')
        return anasig
コード例 #11
0
ファイル: pynnio.py プロジェクト: CINPLA/python-neo
    def _extract_signals(self, data, metadata, lazy):

        signal = None
        if lazy and data.size > 0:
            signal = AnalogSignal([],
                                  units=self._determine_units(metadata),
                                  sampling_period=metadata['dt']*pq.ms)
            signal.lazy_shape = None
        else:
            arr = numpy.vstack(self._extract_array(data, channel_index)
                               for channel_index in range(metadata['first_index'], metadata['last_index'] + 1))
            if len(arr) > 0:
                signal = AnalogSignal(arr.T,
                                      units=self._determine_units(metadata),
                                      sampling_period=metadata['dt']*pq.ms)
        if signal is not None:
            signal.annotate(label=metadata["label"],
                            variable=metadata["variable"])
        return signal
コード例 #12
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times, signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
コード例 #13
0
ファイル: pynnio.py プロジェクト: bal47/python-neo
 def _extract_signal(self, data, metadata, channel_index, lazy):
     signal = None
     if lazy:
         if channel_index in data[:, 1]:
             signal = AnalogSignal([],
                                   units=self._determine_units(metadata),
                                   sampling_period=metadata['dt']*pq.ms,
                                   channel_index=channel_index)
             signal.lazy_shape = None
     else:
         arr = self._extract_array(data, channel_index)
         if len(arr) > 0:
             signal = AnalogSignal(arr,
                                   units=self._determine_units(metadata),
                                   sampling_period=metadata['dt']*pq.ms,
                                   channel_index=channel_index)
     if signal is not None:
         signal.annotate(label=metadata["label"],
                         variable=metadata["variable"])
         return signal
コード例 #14
0
 def _extract_signal(self, data, metadata, channel_index, lazy):
     signal = None
     if lazy:
         if channel_index in data[:, 1]:
             signal = AnalogSignal([],
                                   units=self._determine_units(metadata),
                                   sampling_period=metadata['dt']*pq.ms,
                                   channel_index=channel_index)
             signal.lazy_shape = None
     else:
         arr = self._extract_array(data, channel_index)
         if len(arr) > 0:
             signal = AnalogSignal(arr,
                                   units=self._determine_units(metadata),
                                   sampling_period=metadata['dt']*pq.ms,
                                   channel_index=channel_index)
     if signal is not None:
         signal.annotate(label=metadata["label"],
                         variable=metadata["variable"])
         return signal
コード例 #15
0
ファイル: heka_io.py プロジェクト: psilentp/Analysis
def gettrace(trec,f):
    import numpy as np
    format_type_lenghts = [2,4,4,8]
    format_type = [np.int16,np.int32,np.float32,np.float64]
    pointsize = format_type_lenghts[int(trec.trDataFormat)]
    dtype = format_type[int(trec.trDataFormat)]
    f.seek(int(trec.trData))
    byte_string = f.read(int(trec.trDataPoints)*pointsize)
    import numpy as np
    ydata = np.fromstring(byte_string,dtype = dtype)
    tunit = pq.Quantity(1,str(trec.trXUnit))
    yunit = pq.Quantity(1,str(trec.trYUnit))
    sig = AnalogSignal(ydata*float(trec.trDataScaler)*yunit,
            sampling_period=float(trec.trXInterval)*tunit,
            units = trec.trYUnit[0])
    annotations = trec.__dict__.keys()
    annotations.remove('readlist')
    for a in annotations:
        d = {a:str(trec.__dict__[a])}
        sig.annotate(**d)
    return sig
コード例 #16
0
    def read_analogsignal(self, channel_index=None, lazy=False, cascade=True):
        """
        Read raw traces
        Arguments:
            channel_index: must be integer
        """
        try:
            channel_index = int(channel_index)
        except TypeError:
            print('channel_index must be int, not %s' % type(channel_index))

        bit_volts = self._attrs['app_data']['channel_bit_volts']
        sig_unit = 'uV'
        if lazy:
            anasig = AnalogSignal(
                [],
                units=sig_unit,
                sampling_rate=self._attrs['kwe']['sample_rate'] * pq.Hz,
                t_start=self._attrs['kwe']['start_time'] * pq.s,
                channel_index=channel_index,
                name=self._nodes['Rhythm FPGA'][0]['chanNames'][channel_index])
            # we add the attribute lazy_shape with the size if loaded
            anasig.lazy_shape = self._attrs['shape'][0]
        else:
            data = self._kwd['recordings'][str(
                self._dataset)]['data'].value[:, channel_index]
            data = data * bit_volts[channel_index]
            anasig = AnalogSignal(
                data,
                units=sig_unit,
                sampling_rate=self._attrs['kwe']['sample_rate'] * pq.Hz,
                t_start=self._attrs['kwe']['start_time'] * pq.s,
                channel_index=channel_index,
                name=self._nodes['Rhythm FPGA'][0]['chanNames'][channel_index])
            data = []  # delete from memory
        # for attributes out of neo you can annotate
        anasig.annotate(info='raw trace')
        return anasig
コード例 #17
0
    def read_segment(self,
                     import_neuroshare_segment=True,
                     lazy=False,
                     cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment(file_origin=os.path.basename(self.filename), )

        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)

        #elif sys.platform.startswith('darwin'):

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info))
        seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' +
                     str(info.dwAPIVersionMin))

        if not cascade:
            return seg

        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename),
                               ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo),
                                  ctypes.sizeof(fileinfo))

        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo(hFile, dwEntityID,
                                        ctypes.byref(entityInfo),
                                        ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo(hFile, dwEntityID,
                                           ctypes.byref(pEventInfo),
                                           ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0:  #TEXT
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:  #CVS
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:  # 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:  # 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:  # 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name=str(entityInfo.szEntityLabel), )
                if not lazy:
                    times = []
                    labels = []
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetEventData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize))
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times * pq.s
                    ea.labels = np.array(labels, dtype='S')
                else:
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo(hFile, dwEntityID,
                                            ctypes.byref(pAnalogInfo),
                                            ctypes.sizeof(pAnalogInfo))
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [] * pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    total_read = 0
                    while total_read < entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount -
                                                      total_read)

                        neuroshare.ns_GetAnalogData(
                            hFile, dwEntityID, dwStartIndex, dwStopIndex,
                            ctypes.byref(pdwContCount),
                            pData[total_read:].ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value

                    signal = pq.Quantity(pData,
                                         units=pAnalogInfo.szUnits,
                                         copy=False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex,
                                             ctypes.byref(pdTime))

                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pAnalogInfo.dSampleRate * pq.Hz,
                    t_start=pdTime.value * pq.s,
                    name=str(entityInfo.szEntityLabel),
                )
                anaSig.annotate(probe_info=str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append(anaSig)

            #segment
            if entity_types[
                    entityInfo.
                    dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo(hFile, dwEntityID,
                                             ctypes.byref(pdwSegmentInfo),
                                             ctypes.sizeof(pdwSegmentInfo))
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)

                for dwSourceID in range(pdwSegmentInfo.dwSourceCount):
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo(
                        hFile, dwEntityID, dwSourceID,
                        ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo))

                if lazy:
                    sptr = SpikeTrain(times,
                                      name=str(entityInfo.szEntityLabel),
                                      t_stop=0. * pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount
                    pData = np.zeros((dwDataBufferSize), dtype='float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID = ctypes.c_uint32(0)

                    nsample = int(dwDataBufferSize)
                    times = np.empty((entityInfo.dwItemCount), dtype='f')
                    waveforms = np.empty(
                        (entityInfo.dwItemCount, nsource, nsample), dtype='f')
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetSegmentData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp),
                            pData.ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                            ctypes.byref(pdwUnitID))

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[
                            dwIndex, :, :] = pData[:nsample * nsource].reshape(
                                nsample, nsource).transpose()

                    sptr = SpikeTrain(
                        times=pq.Quantity(times, units='s', copy=False),
                        t_stop=times.max(),
                        waveforms=pq.Quantity(waveforms,
                                              units=str(
                                                  pdwSegmentInfo.szUnits),
                                              copy=False),
                        left_sweep=nsample / 2. /
                        float(pdwSegmentInfo.dSampleRate) * pq.s,
                        sampling_rate=float(pdwSegmentInfo.dSampleRate) *
                        pq.Hz,
                        name=str(entityInfo.szEntityLabel),
                    )
                seg.spiketrains.append(sptr)

            # neuralevent
            if entity_types[
                    entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo(hFile, dwEntityID,
                                            ctypes.byref(pNeuralInfo),
                                            ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [] * pq.s
                    t_stop = 0 * pq.s
                else:
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData(
                        hFile, dwEntityID, dwStartIndex, dwIndexCount,
                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData * pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(
                    times,
                    t_stop=t_stop,
                    name=str(entityInfo.szEntityLabel),
                )
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
コード例 #18
0
ファイル: micromedio.py プロジェクト: xiki-tempula/python-neo
    def read_segment(
        self,
        cascade=True,
        lazy=False,
    ):
        """
        Arguments:
        """
        f = struct_file(self.filename, 'rb')

        #Name
        f.seek(64, 0)
        surname = f.read(22)
        while surname[-1] == ' ':
            if len(surname) == 0: break
            surname = surname[:-1]
        firstname = f.read(20)
        while firstname[-1] == ' ':
            if len(firstname) == 0: break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(
            name=firstname + ' ' + surname,
            file_origin=os.path.basename(self.filename),
        )
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            return seg

        # area
        f.seek(176, 0)
        zone_names = [
            'ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E',
            'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
            'EVENT B', 'TRIGGER'
        ]
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((rawdata.size / Num_Chan, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromfile(f, dtype='u2', count=Num_Chan)

        units = {
            -1: pq.nano * pq.V,
            0: pq.uV,
            1: pq.mV,
            2: 1,
            100: pq.percent,
            101: pq.dimensionless,
            102: pq.dimensionless
        }

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip("\x00")
            ground = f.read(6).strip("\x00")
            logical_min, logical_max, logical_ground, physical_min, physical_max = f.read_f(
                'iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max -
                               physical_min) / float(logical_max -
                                                     logical_min + 1)
                signal = (rawdata[:, c].astype('f') -
                          logical_ground) * factor * unit

            anaSig = AnalogSignal(signal,
                                  sampling_rate=sampling_rate,
                                  name=label,
                                  channel_index=c)
            if lazy:
                anaSig.lazy_shape = None
            anaSig.annotate(ground=ground)

            seg.analogsignals.append(anaSig)

        sampling_rate = np.mean(
            [anaSig.sampling_rate for anaSig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(
                f.read(length),
                dtype=[('pos', 'u4'), ('label', label_dtype)],
            )
            ea = EventArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (triggers['pos'] !=
                                                           0)
                triggers = triggers[keep]
                ea.labels = triggers['label'].astype('S')
                ea.times = (triggers['pos'] / sampling_rate).rescale('s')
            else:
                ea.lazy_shape = triggers.size
            seg.eventarrays.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[
                                       ('label', 'u4'),
                                       ('start', 'u4'),
                                       ('stop', 'u4'),
                                   ])
            ep = EpochArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (epochs['stop'] <
                                                           rawdata.shape[0])
                epochs = epochs[keep]
                ep.labels = epochs['label'].astype('S')
                ep.times = (epochs['start'] / sampling_rate).rescale('s')
                ep.durations = ((epochs['stop'] - epochs['start']) /
                                sampling_rate).rescale('s')
            else:
                ep.lazy_shape = triggers.size
            seg.epocharrays.append(ep)

        seg.create_many_to_one_relationship()
        return seg
コード例 #19
0
    def read_block(
            self,
            # the 2 first keyword arguments are imposed by neo.io API
            lazy=False,
            cascade=True):
        """
        Return a Block.

        """
        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length - 6) / 2 - 2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin=os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0  # position of the current block in the file
        file_blocks = []  # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx', fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(
                fid, dict_header_type.get(m_TypeBlock, Type_Unknown)).read_f()
            block.update({
                'm_length': m_length,
                'm_TypeBlock': m_TypeBlock,
                'pos': pos_block
            })
            file_blocks.append(block)

        else:  # cascade == True

            seg = Segment(file_origin=os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(
                    fid, dict_header_type.get(m_TypeBlock,
                                              Type_Unknown)).read_f()
                block.update({
                    'm_length': m_length,
                    'm_TypeBlock': m_TypeBlock,
                    'pos': pos_block
                })

                if m_TypeBlock == '2':
                    # The beggining of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = []  # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = []  # list of lists of indexes of data blocks
            # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)

            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype=np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                        file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0  # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l'  # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos'] + 6 + count * 2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form, buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype=np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                            file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos'] + 6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)

                ana_sig.channel_index = \
                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h':  # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
            # the 10000 is here to convert m_time_hsecond from centisecond
            # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version=version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version=version)
        if cascade:
            populate_RecordingChannel(blck, remove_from_annotation=True)
            blck.create_many_to_one_relationship()

        return blck
コード例 #20
0
ファイル: sta.py プロジェクト: INM-6/elephant
def spike_triggered_average(signal, spiketrains, window):
    """
    Calculates the spike-triggered averages of analog signals in a time window
    relative to the spike times of a corresponding spiketrain for multiple
    signals each. The function receives n analog signals and either one or
    n spiketrains. In case it is one spiketrain this one is muliplied n-fold
    and used for each of the n analog signals.

    Parameters
    ----------
    signal : neo AnalogSignal object
        'signal' contains n analog signals.
    spiketrains : one SpikeTrain or one numpy ndarray or a list of n of either of these.
        'spiketrains' contains the times of the spikes in the spiketrains.
    window : tuple of 2 Quantity objects with dimensions of time.
        'window' is the start time and the stop time, relative to a spike, of
        the time interval for signal averaging.
        If the window size is not a multiple of the sampling interval of the 
        signal the window will be extended to the next multiple. 

    Returns
    -------
    result_sta : neo AnalogSignal object
        'result_sta' contains the spike-triggered averages of each of the
        analog signals with respect to the spikes in the corresponding
        spiketrains. The length of 'result_sta' is calculated as the number
        of bins from the given start and stop time of the averaging interval
        and the sampling rate of the analog signal. If for an analog signal
        no spike was either given or all given spikes had to be ignored
        because of a too large averaging interval, the corresponding returned
        analog signal has all entries as nan. The number of used spikes and
        unused spikes for each analog signal are returned as annotations to 
        the returned AnalogSignal object.

    Examples
    --------

    >>> signal = neo.AnalogSignal(np.array([signal1, signal2]).T, units='mV',
    ...                                sampling_rate=10/ms)
    >>> stavg = spike_triggered_average(signal, [spiketrain1, spiketrain2],
    ...                                 (-5 * ms, 10 * ms))

    """

    # checking compatibility of data and data types
    # window_starttime: time to specify the start time of the averaging
    # interval relative to a spike
    # window_stoptime: time to specify the stop time of the averaging
    # interval relative to a spike
    window_starttime, window_stoptime = window
    if not (isinstance(window_starttime, pq.quantity.Quantity) and
            window_starttime.dimensionality.simplified ==
            pq.Quantity(1, "s").dimensionality):
        raise TypeError("The start time of the window (window[0]) "
                        "must be a time quantity.")
    if not (isinstance(window_stoptime, pq.quantity.Quantity) and
            window_stoptime.dimensionality.simplified ==
            pq.Quantity(1, "s").dimensionality):
        raise TypeError("The stop time of the window (window[1]) "
                        "must be a time quantity.")
    if window_stoptime <= window_starttime:
        raise ValueError("The start time of the window (window[0]) must be "
                         "earlier than the stop time of the window (window[1]).")

    # checks on signal
    if not isinstance(signal, AnalogSignal):
        raise TypeError(
            "Signal must be an AnalogSignal, not %s." % type(signal))
    if len(signal.shape) > 1:
        # num_signals: number of analog signals
        num_signals = signal.shape[1]
    else:
        raise ValueError("Empty analog signal, hence no averaging possible.")
    if window_stoptime - window_starttime > signal.t_stop - signal.t_start:
        raise ValueError("The chosen time window is larger than the "
                         "time duration of the signal.")

    # spiketrains type check
    if isinstance(spiketrains, (np.ndarray, SpikeTrain)):
        spiketrains = [spiketrains]
    elif isinstance(spiketrains, list):
        for st in spiketrains:
            if not isinstance(st, (np.ndarray, SpikeTrain)):
                raise TypeError(
                    "spiketrains must be a SpikeTrain, a numpy ndarray, or a "
                    "list of one of those, not %s." % type(spiketrains))
    else:
        raise TypeError(
            "spiketrains must be a SpikeTrain, a numpy ndarray, or a list of "
            "one of those, not %s." % type(spiketrains))

    # multiplying spiketrain in case only a single spiketrain is given
    if len(spiketrains) == 1 and num_signals != 1:
        template = spiketrains[0]
        spiketrains = []
        for i in range(num_signals):
            spiketrains.append(template)

    # checking for matching numbers of signals and spiketrains
    if num_signals != len(spiketrains):
        raise ValueError(
            "The number of signals and spiketrains has to be the same.")

    # checking the times of signal and spiketrains
    for i in range(num_signals):
        if spiketrains[i].t_start < signal.t_start:
            raise ValueError(
                "The spiketrain indexed by %i starts earlier than "
                "the analog signal." % i)
        if spiketrains[i].t_stop > signal.t_stop:
            raise ValueError(
                "The spiketrain indexed by %i stops later than "
                "the analog signal." % i)

    # *** Main algorithm: ***

    # window_bins: number of bins of the chosen averaging interval
    window_bins = int(np.ceil(((window_stoptime - window_starttime) *
        signal.sampling_rate).simplified))
    # result_sta: array containing finally the spike-triggered averaged signal
    result_sta = AnalogSignal(np.zeros((window_bins, num_signals)),
        sampling_rate=signal.sampling_rate, units=signal.units)
    # setting of correct times of the spike-triggered average
    # relative to the spike
    result_sta.t_start = window_starttime
    used_spikes = np.zeros(num_signals, dtype=int)
    unused_spikes = np.zeros(num_signals, dtype=int)
    total_used_spikes = 0

    for i in range(num_signals):
        # summing over all respective signal intervals around spiketimes
        for spiketime in spiketrains[i]:
            # checks for sufficient signal data around spiketime
            if (spiketime + window_starttime >= signal.t_start and
                    spiketime + window_stoptime <= signal.t_stop):
                # calculating the startbin in the analog signal of the
                # averaging window for spike
                startbin = int(np.floor(((spiketime + window_starttime -
                    signal.t_start) * signal.sampling_rate).simplified))
                # adds the signal in selected interval relative to the spike
                result_sta[:, i] += signal[
                    startbin: startbin + window_bins, i]
                # counting of the used spikes
                used_spikes[i] += 1
            else:
                # counting of the unused spikes
                unused_spikes[i] += 1

        # normalization
        result_sta[:, i] = result_sta[:, i] / used_spikes[i]

        total_used_spikes += used_spikes[i]

    if total_used_spikes == 0:
        warnings.warn(
            "No spike at all was either found or used for averaging")
    result_sta.annotate(used_spikes=used_spikes, unused_spikes=unused_spikes)

    return result_sta
コード例 #21
0
    def read_analogsignal(
        self,
        # the 2 first key arguments are imposed by neo.io
        lazy=False,
        cascade=True,
        #channel index as given by the neuroshare API
        channel_index=0,
        #time in seconds to be read
        segment_duration=0.,
        #time in seconds to start reading from
        t_start=0.,
    ):

        #some controls:
        #if no segment duration is given, use the complete file
        if segment_duration == 0.:
            segment_duration = float(self.metadata["TimeSpan"])
        #if the segment duration is bigger than file, use the complete file
        if segment_duration >= float(self.metadata["TimeSpan"]):
            segment_duration = float(self.metadata["TimeSpan"])

        if lazy:
            anasig = AnalogSignal(
                [],
                units="V",
                sampling_rate=self.metadata["sampRate"] * pq.Hz,
                t_start=t_start * pq.s,
            )
            #create a dummie time vector
            tvect = np.arange(t_start, t_start + segment_duration,
                              1. / self.metadata["sampRate"])
            # we add the attribute lazy_shape with the size if loaded
            anasig.lazy_shape = tvect.shape
        else:
            #get the analog object
            sig = self.fd.get_entity(channel_index)
            #get the units (V, mV etc)
            sigUnits = sig.units
            #get the electrode number
            chanName = sig.label[-4:]

            #transform t_start into index (reading will start from this index)
            startat = int(t_start * self.metadata["sampRate"])
            #get the number of bins to read in
            bins = int(segment_duration * self.metadata["sampRate"])

            #if the number of bins to read is bigger than
            #the total number of bins, read only till the end of analog object
            if startat + bins > sig.item_count:
                bins = sig.item_count - startat
            #read the data from the sig object
            sig, _, _ = sig.get_data(index=startat, count=bins)
            #store it to the 'AnalogSignal' object
            anasig = AnalogSignal(sig,
                                  units=sigUnits,
                                  sampling_rate=self.metadata["sampRate"] *
                                  pq.Hz,
                                  t_start=t_start * pq.s,
                                  t_stop=(t_start + segment_duration) * pq.s,
                                  channel_index=channel_index)

            # annotate from which electrode the signal comes from
            anasig.annotate(info="signal from channel %s" % chanName)

        return anasig
コード例 #22
0
    def read_segment(self, cascade=True, lazy=False, ):
        """
        Arguments:
        """
        f = StructFile(open(self.filename, 'rb'))

        # Name
        f.seek(64, 0)
        surname = f.read(22).decode('ascii')
        while surname[-1] == ' ':
            if len(surname) == 0:
                break
            surname = surname[:-1]
        firstname = f.read(20).decode('ascii')
        while firstname[-1] == ' ':
            if len(firstname) == 0:
                break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(name=str(firstname + ' ' + surname),
                      file_origin=os.path.basename(self.filename))
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            f.close()
            return seg

        # area
        f.seek(176, 0)
        zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B',
                      'IMPED_E', 'MONTAGE',
                      'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
                      'EVENT B', 'TRIGGER']
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((-1, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromstring(f.read(Num_Chan*2), dtype='u2', count=Num_Chan)

        units = {-1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent,
                 101: pq.dimensionless, 102: pq.dimensionless}

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip(b"\x00").decode('ascii')
            ground = f.read(6).strip(b"\x00").decode('ascii')
            (logical_min, logical_max, logical_ground, physical_min,
             physical_max) = f.read_f('iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max - physical_min) / float(
                    logical_max - logical_min + 1)
                signal = (rawdata[:, c].astype(
                    'f') - logical_ground) * factor * unit

            ana_sig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                   name=str(label), channel_index=c)
            if lazy:
                ana_sig.lazy_shape = None
            ana_sig.annotate(ground=ground)

            seg.analogsignals.append(ana_sig)

        sampling_rate = np.mean(
            [ana_sig.sampling_rate for ana_sig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(f.read(length), dtype=[('pos', 'u4'), (
                'label', label_dtype)])
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (
                    triggers['pos'] != 0)
                triggers = triggers[keep]
                ea = Event(name=zname[0] + zname[1:].lower(),
                           labels=triggers['label'].astype('S'),
                           times=(triggers['pos'] / sampling_rate).rescale('s'))
            else:
                ea = Event(name=zname[0] + zname[1:].lower())
                ea.lazy_shape = triggers.size
            seg.events.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[('label', 'u4'), ('start', 'u4'),
                                          ('stop', 'u4'), ])
            ep = Epoch(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (
                    epochs['stop'] < rawdata.shape[0])
                epochs = epochs[keep]
                ep = Epoch(name=zname[0] + zname[1:].lower(),
                           labels=epochs['label'].astype('S'),
                           times=(epochs['start'] / sampling_rate).rescale('s'),
                           durations=((epochs['stop'] - epochs['start']) / sampling_rate).rescale('s'))
            else:
                ep = Epoch(name=zname[0] + zname[1:].lower())
                ep.lazy_shape = triggers.size
            seg.epochs.append(ep)

        seg.create_many_to_one_relationship()
        f.close()
        return seg
コード例 #23
0
ファイル: elanio.py プロジェクト: CINPLA/python-neo
    def read_segment(self, lazy=False, cascade=True):

        # # Read header file

        f = open(self.filename + '.ent', 'rU')
        #version
        version = f.readline()
        if version[:2] != 'V2' and version[:2] != 'V3':
            # raise('read only V2 .eeg.ent files')
            raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' %
                               version[:2])

        #info
        info1 = f.readline()[:-1]
        info2 = f.readline()[:-1]

        # strange 2 line for datetime
        #line1
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        YY, MM, DD, hh, mm, ss = (None, ) * 6
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]

        #line2
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]
        try:
            fulldatetime = datetime.datetime(int(YY), int(MM), int(DD),
                                             int(hh), int(mm), int(ss))
        except:
            fulldatetime = None

        seg = Segment(file_origin=os.path.basename(self.filename),
                      elan_version=version,
                      info1=info1,
                      info2=info2,
                      rec_datetime=fulldatetime)

        if not cascade:
            return seg

        l = f.readline()
        l = f.readline()
        l = f.readline()

        # sampling rate sample
        l = f.readline()
        sampling_rate = 1. / float(l) * pq.Hz

        # nb channel
        l = f.readline()
        nbchannel = int(l) - 2

        #channel label
        labels = []
        for c in range(nbchannel + 2):
            labels.append(f.readline()[:-1])

        # channel type
        types = []
        for c in range(nbchannel + 2):
            types.append(f.readline()[:-1])

        # channel unit
        units = []
        for c in range(nbchannel + 2):
            units.append(f.readline()[:-1])
        #print units

        #range
        min_physic = []
        for c in range(nbchannel + 2):
            min_physic.append(float(f.readline()))
        max_physic = []
        for c in range(nbchannel + 2):
            max_physic.append(float(f.readline()))
        min_logic = []
        for c in range(nbchannel + 2):
            min_logic.append(float(f.readline()))
        max_logic = []
        for c in range(nbchannel + 2):
            max_logic.append(float(f.readline()))

        #info filter
        info_filter = []
        for c in range(nbchannel + 2):
            info_filter.append(f.readline()[:-1])

        f.close()

        #raw data
        n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8)
        data = np.fromfile(self.filename, dtype='i' + str(n))
        data = data.byteswap().reshape(
            (data.size / (nbchannel + 2), nbchannel + 2)).astype('f4')
        for c in range(nbchannel):
            if lazy:
                sig = []
            else:
                sig = (data[:, c] - min_logic[c]) / (
                    max_logic[c] - min_logic[c]) * \
                    (max_physic[c] - min_physic[c]) + min_physic[c]

            try:
                unit = pq.Quantity(1, units[c])
            except:
                unit = pq.Quantity(1, '')

            ana_sig = AnalogSignal(
                sig * unit, sampling_rate=sampling_rate,
                t_start=0. * pq.s, name=labels[c], channel_index=c)
            if lazy:
                ana_sig.lazy_shape = data.shape[0]
            ana_sig.annotate(channel_name=labels[c])
            seg.analogsignals.append(ana_sig)

        # triggers
        f = open(self.filename + '.pos')
        times = []
        labels = []
        reject_codes = []
        for l in f.readlines():
            r = re.findall(' *(\d+) *(\d+) *(\d+) *', l)
            times.append(float(r[0][0]) / sampling_rate.magnitude)
            labels.append(str(r[0][1]))
            reject_codes.append(str(r[0][2]))
        if lazy:
            times = [] * pq.S
            labels = np.array([], dtype='S')
            reject_codes = []
        else:
            times = np.array(times) * pq.s
            labels = np.array(labels)
            reject_codes = np.array(reject_codes)
        ea = Event(times=times, labels=labels, reject_codes=reject_codes)
        if lazy:
            ea.lazy_shape = len(times)
        seg.events.append(ea)

        f.close()

        seg.create_many_to_one_relationship()
        return seg
コード例 #24
0
ファイル: alphaomegaio.py プロジェクト: CINPLA/python-neo
    def read_block(self,
                   # the 2 first keyword arguments are imposed by neo.io API
                   lazy = False,
                   cascade = True):
        """
        Return a Block.

        """

        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length-6)/2-2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin = os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0 # position of the current block in the file
        file_blocks = [] # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx' , fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(fid,
                                 dict_header_type.get(m_TypeBlock,
                                                      Type_Unknown)).read_f()
            block.update({'m_length': m_length,
                          'm_TypeBlock': m_TypeBlock,
                          'pos': pos_block})
            file_blocks.append(block)

        else: # cascade == True

            seg = Segment(file_origin = os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(fid,
                                dict_header_type.get(m_TypeBlock,
                                                     Type_Unknown)).read_f()
                block.update({'m_length': m_length,
                              'm_TypeBlock': m_TypeBlock,
                              'pos': pos_block})

                if m_TypeBlock == '2':
                    # The beginning of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = [] # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = [] # list of lists of indexes of data blocks
                           # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)


            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype = np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                                          file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0 # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l' # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos']+6+count*2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form , buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                                file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos']+6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
# todo apibreak: create ChannelIndex for each signals
#                ana_sig.channel_index = \
#                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
                # the 10000 is here to convert m_time_hsecond from centisecond
                # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version = version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version = version)
        if cascade:
            blck.create_many_to_one_relationship()

        return blck
コード例 #25
0
def spike_triggered_average(signal, spiketrains, window):
    """
    Calculates the spike-triggered averages of analog signals in a time window
    relative to the spike times of a corresponding spiketrain for multiple
    signals each. The function receives n analog signals and either one or
    n spiketrains. In case it is one spiketrain this one is muliplied n-fold
    and used for each of the n analog signals.

    Parameters
    ----------
    signal : neo AnalogSignal object
        'signal' contains n analog signals.
    spiketrains : one SpikeTrain or one numpy ndarray or a list of n of either of these.
        'spiketrains' contains the times of the spikes in the spiketrains.
    window : tuple of 2 Quantity objects with dimensions of time.
        'window' is the start time and the stop time, relative to a spike, of
        the time interval for signal averaging.
        If the window size is not a multiple of the sampling interval of the 
        signal the window will be extended to the next multiple. 

    Returns
    -------
    result_sta : neo AnalogSignal object
        'result_sta' contains the spike-triggered averages of each of the
        analog signals with respect to the spikes in the corresponding
        spiketrains. The length of 'result_sta' is calculated as the number
        of bins from the given start and stop time of the averaging interval
        and the sampling rate of the analog signal. If for an analog signal
        no spike was either given or all given spikes had to be ignored
        because of a too large averaging interval, the corresponding returned
        analog signal has all entries as nan. The number of used spikes and
        unused spikes for each analog signal are returned as annotations to 
        the returned AnalogSignal object.

    Examples
    --------

    >>> signal = neo.AnalogSignal(np.array([signal1, signal2]).T, units='mV',
    ...                                sampling_rate=10/ms)
    >>> stavg = spike_triggered_average(signal, [spiketrain1, spiketrain2],
    ...                                 (-5 * ms, 10 * ms))

    """

    # checking compatibility of data and data types
    # window_starttime: time to specify the start time of the averaging
    # interval relative to a spike
    # window_stoptime: time to specify the stop time of the averaging
    # interval relative to a spike
    window_starttime, window_stoptime = window
    if not (isinstance(window_starttime, pq.quantity.Quantity)
            and window_starttime.dimensionality.simplified == pq.Quantity(
                1, "s").dimensionality):
        raise TypeError("The start time of the window (window[0]) "
                        "must be a time quantity.")
    if not (isinstance(window_stoptime, pq.quantity.Quantity)
            and window_stoptime.dimensionality.simplified == pq.Quantity(
                1, "s").dimensionality):
        raise TypeError("The stop time of the window (window[1]) "
                        "must be a time quantity.")
    if window_stoptime <= window_starttime:
        raise ValueError(
            "The start time of the window (window[0]) must be "
            "earlier than the stop time of the window (window[1]).")

    # checks on signal
    if not isinstance(signal, AnalogSignal):
        raise TypeError("Signal must be an AnalogSignal, not %s." %
                        type(signal))
    if len(signal.shape) > 1:
        # num_signals: number of analog signals
        num_signals = signal.shape[1]
    else:
        raise ValueError("Empty analog signal, hence no averaging possible.")
    if window_stoptime - window_starttime > signal.t_stop - signal.t_start:
        raise ValueError("The chosen time window is larger than the "
                         "time duration of the signal.")

    # spiketrains type check
    if isinstance(spiketrains, (np.ndarray, SpikeTrain)):
        spiketrains = [spiketrains]
    elif isinstance(spiketrains, list):
        for st in spiketrains:
            if not isinstance(st, (np.ndarray, SpikeTrain)):
                raise TypeError(
                    "spiketrains must be a SpikeTrain, a numpy ndarray, or a "
                    "list of one of those, not %s." % type(spiketrains))
    else:
        raise TypeError(
            "spiketrains must be a SpikeTrain, a numpy ndarray, or a list of "
            "one of those, not %s." % type(spiketrains))

    # multiplying spiketrain in case only a single spiketrain is given
    if len(spiketrains) == 1 and num_signals != 1:
        template = spiketrains[0]
        spiketrains = []
        for i in range(num_signals):
            spiketrains.append(template)

    # checking for matching numbers of signals and spiketrains
    if num_signals != len(spiketrains):
        raise ValueError(
            "The number of signals and spiketrains has to be the same.")

    # checking the times of signal and spiketrains
    for i in range(num_signals):
        if spiketrains[i].t_start < signal.t_start:
            raise ValueError(
                "The spiketrain indexed by %i starts earlier than "
                "the analog signal." % i)
        if spiketrains[i].t_stop > signal.t_stop:
            raise ValueError("The spiketrain indexed by %i stops later than "
                             "the analog signal." % i)

    # *** Main algorithm: ***

    # window_bins: number of bins of the chosen averaging interval
    window_bins = int(
        np.ceil(((window_stoptime - window_starttime) *
                 signal.sampling_rate).simplified))
    # result_sta: array containing finally the spike-triggered averaged signal
    result_sta = AnalogSignal(np.zeros((window_bins, num_signals)),
                              sampling_rate=signal.sampling_rate,
                              units=signal.units)
    # setting of correct times of the spike-triggered average
    # relative to the spike
    result_sta.t_start = window_starttime
    used_spikes = np.zeros(num_signals, dtype=int)
    unused_spikes = np.zeros(num_signals, dtype=int)
    total_used_spikes = 0

    for i in range(num_signals):
        # summing over all respective signal intervals around spiketimes
        for spiketime in spiketrains[i]:
            # checks for sufficient signal data around spiketime
            if (spiketime + window_starttime >= signal.t_start
                    and spiketime + window_stoptime <= signal.t_stop):
                # calculating the startbin in the analog signal of the
                # averaging window for spike
                startbin = int(
                    np.floor(((spiketime + window_starttime - signal.t_start) *
                              signal.sampling_rate).simplified))
                # adds the signal in selected interval relative to the spike
                result_sta[:, i] += signal[startbin:startbin + window_bins, i]
                # counting of the used spikes
                used_spikes[i] += 1
            else:
                # counting of the unused spikes
                unused_spikes[i] += 1

        # normalization
        result_sta[:, i] = result_sta[:, i] / used_spikes[i]

        total_used_spikes += used_spikes[i]

    if total_used_spikes == 0:
        warnings.warn("No spike at all was either found or used for averaging")
    result_sta.annotate(used_spikes=used_spikes, unused_spikes=unused_spikes)

    return result_sta
コード例 #26
0
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

                anaSig = AnalogSignal(signal,
                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
                                                    t_start = pdTime.value * pq.s,
                                                    name = str(entityInfo.szEntityLabel),
                                                    )
                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append( anaSig )


            #segment
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )

                if lazy:
                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp  = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID= ctypes.c_uint32(0)

                    nsample  = int(dwDataBufferSize)
                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                                ctypes.byref(pdwUnitID ) )

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
                    
                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
                                        t_stop = times.max(),
                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
                                        name = str(entityInfo.szEntityLabel),
                                        )
                seg.spiketrains.append(sptr)


            # neuralevent
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [ ]*pq.s
                    t_stop = 0*pq.s
                else:
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData*pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(times, t_stop =t_stop,
                                                name = str(entityInfo.szEntityLabel),)
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
コード例 #27
0
    def read_segment(self, lazy=False, cascade=True):

        ## Read header file

        f = open(self.filename + '.ent', 'rU')
        #version
        version = f.readline()
        if version[:2] != 'V2' and version[:2] != 'V3':
            # raise('read only V2 .eeg.ent files')
            raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' %
                               version[:2])
            return

        #info
        info1 = f.readline()[:-1]
        info2 = f.readline()[:-1]

        # strange 2 line for datetime
        #line1
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        YY, MM, DD, hh, mm, ss = (None, ) * 6
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]

        #line2
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]
        try:
            fulldatetime = datetime.datetime(int(YY), int(MM), int(DD),
                                             int(hh), int(mm), int(ss))
        except:
            fulldatetime = None

        seg = Segment(
            file_origin=os.path.basename(self.filename),
            elan_version=version,
            info1=info1,
            info2=info2,
            rec_datetime=fulldatetime,
        )

        if not cascade: return seg

        l = f.readline()
        l = f.readline()
        l = f.readline()

        # sampling rate sample
        l = f.readline()
        sampling_rate = 1. / float(l) * pq.Hz

        # nb channel
        l = f.readline()
        nbchannel = int(l) - 2

        #channel label
        labels = []
        for c in range(nbchannel + 2):
            labels.append(f.readline()[:-1])

        # channel type
        types = []
        for c in range(nbchannel + 2):
            types.append(f.readline()[:-1])

        # channel unit
        units = []
        for c in range(nbchannel + 2):
            units.append(f.readline()[:-1])
        #print units

        #range
        min_physic = []
        for c in range(nbchannel + 2):
            min_physic.append(float(f.readline()))
        max_physic = []
        for c in range(nbchannel + 2):
            max_physic.append(float(f.readline()))
        min_logic = []
        for c in range(nbchannel + 2):
            min_logic.append(float(f.readline()))
        max_logic = []
        for c in range(nbchannel + 2):
            max_logic.append(float(f.readline()))

        #info filter
        info_filter = []
        for c in range(nbchannel + 2):
            info_filter.append(f.readline()[:-1])

        f.close()

        #raw data
        n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8)
        data = np.fromfile(self.filename, dtype='i' + str(n))
        data = data.byteswap().reshape(
            (data.size / (nbchannel + 2), nbchannel + 2)).astype('f4')
        for c in range(nbchannel):
            if lazy:
                sig = []
            else:
                sig = (data[:,c]-min_logic[c])/(max_logic[c]-min_logic[c])*\
                                    (max_physic[c]-min_physic[c])+min_physic[c]

            try:
                unit = pq.Quantity(1, units[c])
            except:
                unit = pq.Quantity(1, '')

            anaSig = AnalogSignal(sig * unit,
                                  sampling_rate=sampling_rate,
                                  t_start=0. * pq.s,
                                  name=labels[c],
                                  channel_index=c)
            if lazy:
                anaSig.lazy_shape = data.shape[0]
            anaSig.annotate(channel_name=labels[c])
            seg.analogsignals.append(anaSig)

        # triggers
        f = open(self.filename + '.pos')
        times = []
        labels = []
        reject_codes = []
        for l in f.readlines():
            r = re.findall(' *(\d+) *(\d+) *(\d+) *', l)
            times.append(float(r[0][0]) / sampling_rate.magnitude)
            labels.append(str(r[0][1]))
            reject_codes.append(str(r[0][2]))
        if lazy:
            times = [] * pq.S
            labels = np.array([], dtype='S')
            reject_codes = []
        else:
            times = np.array(times) * pq.s
            labels = np.array(labels)
            reject_codes = np.array(reject_codes)
        ea = EventArray(
            times=times,
            labels=labels,
            reject_codes=reject_codes,
        )
        if lazy:
            ea.lazy_shape = len(times)
        seg.eventarrays.append(ea)

        f.close()

        seg.create_many_to_one_relationship()
        return seg