Ejemplo n.º 1
0
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        rchan = RecordingChannel(name='rchan1')
        rchan.analogsignals = [signal]
        rchan.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'RecordingChannel'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'recordingchannel'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects,
                         ('Segment', 'RecordingChannel'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'recordingchannel'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'rchan1')

        assert_neo_object_is_compliant(signal)
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        chx = ChannelIndex(name='chx1', index=np.arange(signal.shape[1]))
        chx.analogsignals = [signal]
        chx.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'ChannelIndex'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'channel_index'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects, ('Segment', 'ChannelIndex'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'channel_index'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'chx1')

        assert_neo_object_is_compliant(signal)
Ejemplo n.º 3
0
    def test__children(self):
        segment = Segment(name='seg1')
        segment.spikes = [self.spike1]
        segment.create_many_to_one_relationship()

        unit = Unit(name='unit1')
        unit.spikes = [self.spike1]
        unit.create_many_to_one_relationship()

        self.assertEqual(self.spike1._single_parent_objects,
                         ('Segment', 'Unit'))
        self.assertEqual(self.spike1._multi_parent_objects, ())

        self.assertEqual(self.spike1._single_parent_containers,
                         ('segment', 'unit'))
        self.assertEqual(self.spike1._multi_parent_containers, ())

        self.assertEqual(self.spike1._parent_objects,
                         ('Segment', 'Unit'))
        self.assertEqual(self.spike1._parent_containers,
                         ('segment', 'unit'))

        self.assertEqual(len(self.spike1.parents), 2)
        self.assertEqual(self.spike1.parents[0].name, 'seg1')
        self.assertEqual(self.spike1.parents[1].name, 'unit1')

        assert_neo_object_is_compliant(self.spike1)
Ejemplo n.º 4
0
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        rchan = RecordingChannel(name='rchan1')
        rchan.analogsignals = [signal]
        rchan.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'RecordingChannel'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'recordingchannel'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects,
                         ('Segment', 'RecordingChannel'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'recordingchannel'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'rchan1')

        assert_neo_object_is_compliant(signal)
Ejemplo n.º 5
0
    def test__children(self):
        params = {"test2": "y1", "test3": True}
        evt = Event(
            1.5 * pq.ms,
            label="test epoch",
            name="test",
            description="tester",
            file_origin="test.file",
            test1=1,
            **params
        )
        evt.annotate(test1=1.1, test0=[1, 2])
        assert_neo_object_is_compliant(evt)

        segment = Segment(name="seg1")
        segment.events = [evt]
        segment.create_many_to_one_relationship()

        self.assertEqual(evt._single_parent_objects, ("Segment",))
        self.assertEqual(evt._multi_parent_objects, ())

        self.assertEqual(evt._single_parent_containers, ("segment",))
        self.assertEqual(evt._multi_parent_containers, ())

        self.assertEqual(evt._parent_objects, ("Segment",))
        self.assertEqual(evt._parent_containers, ("segment",))

        self.assertEqual(len(evt.parents), 1)
        self.assertEqual(evt.parents[0].name, "seg1")

        assert_neo_object_is_compliant(evt)
Ejemplo n.º 6
0
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        chx = ChannelIndex(name='chx1', index=np.arange(signal.shape[1]))
        chx.analogsignals = [signal]
        chx.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects, ('Segment', 'ChannelIndex'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers, ('segment', 'channel_index'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects, ('Segment', 'ChannelIndex'))
        self.assertEqual(signal._parent_containers, ('segment', 'channel_index'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'chx1')

        assert_neo_object_is_compliant(signal)
Ejemplo n.º 7
0
 def read_segment(self, lazy=False, cascade=True):
     data, metadata = self._read_file_contents()
     annotations = dict(
         (k, metadata.get(k, 'unknown'))
         for k in ("label", "variable", "first_id", "last_id"))
     seg = Segment(**annotations)
     if cascade:
         if metadata['variable'] == 'spikes':
             for i in range(metadata['first_index'],
                            metadata['last_index']):
                 spiketrain = self._extract_spikes(data, metadata, i, lazy)
                 if spiketrain is not None:
                     seg.spiketrains.append(spiketrain)
             seg.annotate(
                 dt=metadata['dt']
             )  # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
         else:
             for i in range(metadata['first_index'],
                            metadata['last_index']):
                 # probably slow. Replace with numpy-based version from 0.1
                 signal = self._extract_signal(data, metadata, i, lazy)
                 if signal is not None:
                     seg.analogsignals.append(signal)
         seg.create_many_to_one_relationship()
     return seg
Ejemplo n.º 8
0
    def test__children(self):
        params = {'test2': 'y1', 'test3': True}
        epc = Epoch([1.1, 1.5, 1.7]*pq.ms, durations=[20, 40, 60]*pq.ns,
                    labels=np.array(['test epoch 1',
                                     'test epoch 2',
                                     'test epoch 3'], dtype='S'),
                    name='test', description='tester',
                    file_origin='test.file',
                    test1=1, **params)
        epc.annotate(test1=1.1, test0=[1, 2])
        assert_neo_object_is_compliant(epc)

        segment = Segment(name='seg1')
        segment.epochs = [epc]
        segment.create_many_to_one_relationship()

        self.assertEqual(epc._single_parent_objects, ('Segment',))
        self.assertEqual(epc._multi_parent_objects, ())

        self.assertEqual(epc._single_parent_containers, ('segment',))
        self.assertEqual(epc._multi_parent_containers, ())

        self.assertEqual(epc._parent_objects, ('Segment',))
        self.assertEqual(epc._parent_containers, ('segment',))

        self.assertEqual(len(epc.parents), 1)
        self.assertEqual(epc.parents[0].name, 'seg1')

        assert_neo_object_is_compliant(epc)
Ejemplo n.º 9
0
    def test__children(self):
        params = {'test2': 'y1', 'test3': True}
        evt = Event(1.5 * pq.ms,
                    label='test epoch',
                    name='test',
                    description='tester',
                    file_origin='test.file',
                    test1=1,
                    **params)
        evt.annotate(test1=1.1, test0=[1, 2])
        assert_neo_object_is_compliant(evt)

        segment = Segment(name='seg1')
        segment.events = [evt]
        segment.create_many_to_one_relationship()

        self.assertEqual(evt._single_parent_objects, ('Segment', ))
        self.assertEqual(evt._multi_parent_objects, ())

        self.assertEqual(evt._single_parent_containers, ('segment', ))
        self.assertEqual(evt._multi_parent_containers, ())

        self.assertEqual(evt._parent_objects, ('Segment', ))
        self.assertEqual(evt._parent_containers, ('segment', ))

        self.assertEqual(len(evt.parents), 1)
        self.assertEqual(evt.parents[0].name, 'seg1')

        assert_neo_object_is_compliant(evt)
Ejemplo n.º 10
0
    def test__children(self):
        params = {'test2': 'y1', 'test3': True}
        epc = Epoch([1.1, 1.5, 1.7] * pq.ms, durations=[20, 40, 60] * pq.ns,
                    labels=np.array(['test epoch 1', 'test epoch 2', 'test epoch 3'], dtype='S'),
                    name='test', description='tester', file_origin='test.file', test1=1, **params)
        epc.annotate(test1=1.1, test0=[1, 2])
        assert_neo_object_is_compliant(epc)

        segment = Segment(name='seg1')
        segment.epochs = [epc]
        segment.create_many_to_one_relationship()

        self.assertEqual(epc._single_parent_objects, ('Segment',))
        self.assertEqual(epc._multi_parent_objects, ())

        self.assertEqual(epc._single_parent_containers, ('segment',))
        self.assertEqual(epc._multi_parent_containers, ())

        self.assertEqual(epc._parent_objects, ('Segment',))
        self.assertEqual(epc._parent_containers, ('segment',))

        self.assertEqual(len(epc.parents), 1)
        self.assertEqual(epc.parents[0].name, 'seg1')

        assert_neo_object_is_compliant(epc)
Ejemplo n.º 11
0
    def test__children(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        evta = EventArray([1.1, 1.5, 1.7] * pq.ms,
                          labels=np.array(
                              ['test event 1', 'test event 2', 'test event 3'],
                              dtype='S'),
                          name='test',
                          description='tester',
                          file_origin='test.file',
                          testarg1=1,
                          **params)
        evta.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        assert_neo_object_is_compliant(evta)

        segment = Segment(name='seg1')
        segment.eventarrays = [evta]
        segment.create_many_to_one_relationship()

        self.assertEqual(evta._container_child_objects, ())
        self.assertEqual(evta._data_child_objects, ())
        self.assertEqual(evta._single_parent_objects, ('Segment', ))
        self.assertEqual(evta._multi_child_objects, ())
        self.assertEqual(evta._multi_parent_objects, ())
        self.assertEqual(evta._child_properties, ())

        self.assertEqual(evta._single_child_objects, ())

        self.assertEqual(evta._container_child_containers, ())
        self.assertEqual(evta._data_child_containers, ())
        self.assertEqual(evta._single_child_containers, ())
        self.assertEqual(evta._single_parent_containers, ('segment', ))
        self.assertEqual(evta._multi_child_containers, ())
        self.assertEqual(evta._multi_parent_containers, ())

        self.assertEqual(evta._child_objects, ())
        self.assertEqual(evta._child_containers, ())
        self.assertEqual(evta._parent_objects, ('Segment', ))
        self.assertEqual(evta._parent_containers, ('segment', ))

        self.assertEqual(evta.children, ())
        self.assertEqual(len(evta.parents), 1)
        self.assertEqual(evta.parents[0].name, 'seg1')

        evta.create_many_to_one_relationship()
        evta.create_many_to_many_relationship()
        evta.create_relationship()
        assert_neo_object_is_compliant(evta)
Ejemplo n.º 12
0
    def test__children(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        epc = Epoch(1.5 * pq.ms,
                    duration=20 * pq.ns,
                    label='test epoch',
                    name='test',
                    description='tester',
                    file_origin='test.file',
                    testarg1=1,
                    **params)
        epc.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        assert_neo_object_is_compliant(epc)

        segment = Segment(name='seg1')
        segment.epochs = [epc]
        segment.create_many_to_one_relationship()

        self.assertEqual(epc._container_child_objects, ())
        self.assertEqual(epc._data_child_objects, ())
        self.assertEqual(epc._single_parent_objects, ('Segment', ))
        self.assertEqual(epc._multi_child_objects, ())
        self.assertEqual(epc._multi_parent_objects, ())
        self.assertEqual(epc._child_properties, ())

        self.assertEqual(epc._single_child_objects, ())

        self.assertEqual(epc._container_child_containers, ())
        self.assertEqual(epc._data_child_containers, ())
        self.assertEqual(epc._single_child_containers, ())
        self.assertEqual(epc._single_parent_containers, ('segment', ))
        self.assertEqual(epc._multi_child_containers, ())
        self.assertEqual(epc._multi_parent_containers, ())

        self.assertEqual(epc._child_objects, ())
        self.assertEqual(epc._child_containers, ())
        self.assertEqual(epc._parent_objects, ('Segment', ))
        self.assertEqual(epc._parent_containers, ('segment', ))

        self.assertEqual(epc.children, ())
        self.assertEqual(len(epc.parents), 1)
        self.assertEqual(epc.parents[0].name, 'seg1')

        epc.create_many_to_one_relationship()
        epc.create_many_to_many_relationship()
        epc.create_relationship()
        assert_neo_object_is_compliant(epc)
Ejemplo n.º 13
0
 def read_segment(self, lazy=False, cascade=True):
     data, metadata = self._read_file_contents()
     annotations = dict((k, metadata.get(k, 'unknown')) for k in ("label", "variable", "first_id", "last_id"))
     seg = Segment(**annotations)
     if cascade:
         if metadata['variable'] == 'spikes':
             for i in range(metadata['first_index'], metadata['last_index'] + 1):
                 spiketrain = self._extract_spikes(data, metadata, i, lazy)
                 if spiketrain is not None:
                     seg.spiketrains.append(spiketrain)
             seg.annotate(dt=metadata['dt']) # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
         else:
             signal = self._extract_signals(data, metadata, lazy)
             if signal is not None:
                 seg.analogsignals.append(signal)
         seg.create_many_to_one_relationship()
     return seg
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignalarrays = [signal]
        segment.create_many_to_one_relationship()

        rcg = RecordingChannelGroup(name='rcg1')
        rcg.analogsignalarrays = [signal]
        rcg.create_many_to_one_relationship()

        self.assertEqual(signal._container_child_objects, ())
        self.assertEqual(signal._data_child_objects, ())
        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'RecordingChannelGroup'))
        self.assertEqual(signal._multi_child_objects, ())
        self.assertEqual(signal._multi_parent_objects, ())
        self.assertEqual(signal._child_properties, ())

        self.assertEqual(signal._single_child_objects, ())

        self.assertEqual(signal._container_child_containers, ())
        self.assertEqual(signal._data_child_containers, ())
        self.assertEqual(signal._single_child_containers, ())
        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'recordingchannelgroup'))
        self.assertEqual(signal._multi_child_containers, ())
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._child_objects, ())
        self.assertEqual(signal._child_containers, ())
        self.assertEqual(signal._parent_objects,
                         ('Segment', 'RecordingChannelGroup'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'recordingchannelgroup'))

        self.assertEqual(signal.children, ())
        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'rcg1')

        signal.create_many_to_one_relationship()
        signal.create_many_to_many_relationship()
        signal.create_relationship()
        assert_neo_object_is_compliant(signal)
Ejemplo n.º 15
0
    def test__children(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        evta = EventArray([1.1, 1.5, 1.7]*pq.ms,
                          labels=np.array(['test event 1',
                                           'test event 2',
                                           'test event 3'], dtype='S'),
                          name='test', description='tester',
                          file_origin='test.file',
                          testarg1=1, **params)
        evta.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        assert_neo_object_is_compliant(evta)

        segment = Segment(name='seg1')
        segment.eventarrays = [evta]
        segment.create_many_to_one_relationship()

        self.assertEqual(evta._container_child_objects, ())
        self.assertEqual(evta._data_child_objects, ())
        self.assertEqual(evta._single_parent_objects, ('Segment',))
        self.assertEqual(evta._multi_child_objects, ())
        self.assertEqual(evta._multi_parent_objects, ())
        self.assertEqual(evta._child_properties, ())

        self.assertEqual(evta._single_child_objects, ())

        self.assertEqual(evta._container_child_containers, ())
        self.assertEqual(evta._data_child_containers, ())
        self.assertEqual(evta._single_child_containers, ())
        self.assertEqual(evta._single_parent_containers, ('segment',))
        self.assertEqual(evta._multi_child_containers, ())
        self.assertEqual(evta._multi_parent_containers, ())

        self.assertEqual(evta._child_objects, ())
        self.assertEqual(evta._child_containers, ())
        self.assertEqual(evta._parent_objects, ('Segment',))
        self.assertEqual(evta._parent_containers, ('segment',))

        self.assertEqual(evta.children, ())
        self.assertEqual(len(evta.parents), 1)
        self.assertEqual(evta.parents[0].name, 'seg1')

        evta.create_many_to_one_relationship()
        evta.create_many_to_many_relationship()
        evta.create_relationship()
        assert_neo_object_is_compliant(evta)
Ejemplo n.º 16
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
        delimiter='\t',
        t_start=0. * pq.s,
        unit=pq.s,
    ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            t_start : time start of all spiketrain 0 by default
            unit : unit of spike times, can be a str or directly a Quantities
        """
        unit = pq.Quantity(1, unit)

        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        f = open(self.filename, 'Ur')
        for i, line in enumerate(f):
            alldata = line[:-1].split(delimiter)
            if alldata[-1] == '': alldata = alldata[:-1]
            if alldata[0] == '': alldata = alldata[1:]
            if lazy:
                spike_times = []
                t_stop = t_start
            else:
                spike_times = np.array(alldata).astype('f')
                t_stop = spike_times.max() * unit

            sptr = SpikeTrain(spike_times * unit,
                              t_start=t_start,
                              t_stop=t_stop)
            if lazy:
                sptr.lazy_shape = len(alldata)

            sptr.annotate(channel_index=i)
            seg.spiketrains.append(sptr)
        f.close()

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 17
0
    def test__children(self):
        params = {'testarg2': 'yes', 'testarg3': True}
        epc = Epoch(1.5*pq.ms, duration=20*pq.ns,
                    label='test epoch', name='test', description='tester',
                    file_origin='test.file',
                    testarg1=1, **params)
        epc.annotate(testarg1=1.1, testarg0=[1, 2, 3])
        assert_neo_object_is_compliant(epc)

        segment = Segment(name='seg1')
        segment.epochs = [epc]
        segment.create_many_to_one_relationship()

        self.assertEqual(epc._container_child_objects, ())
        self.assertEqual(epc._data_child_objects, ())
        self.assertEqual(epc._single_parent_objects, ('Segment',))
        self.assertEqual(epc._multi_child_objects, ())
        self.assertEqual(epc._multi_parent_objects, ())
        self.assertEqual(epc._child_properties, ())

        self.assertEqual(epc._single_child_objects, ())

        self.assertEqual(epc._container_child_containers, ())
        self.assertEqual(epc._data_child_containers, ())
        self.assertEqual(epc._single_child_containers, ())
        self.assertEqual(epc._single_parent_containers, ('segment',))
        self.assertEqual(epc._multi_child_containers, ())
        self.assertEqual(epc._multi_parent_containers, ())

        self.assertEqual(epc._child_objects, ())
        self.assertEqual(epc._child_containers, ())
        self.assertEqual(epc._parent_objects, ('Segment',))
        self.assertEqual(epc._parent_containers, ('segment',))

        self.assertEqual(epc.children, ())
        self.assertEqual(len(epc.parents), 1)
        self.assertEqual(epc.parents[0].name, 'seg1')

        epc.create_many_to_one_relationship()
        epc.create_many_to_many_relationship()
        epc.create_relationship()
        assert_neo_object_is_compliant(epc)
Ejemplo n.º 18
0
    def read_segment(self,
                            lazy = False,
                            cascade = True,
                            delimiter = '\t',
                            t_start = 0.*pq.s,
                            unit = pq.s,
                            ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            t_start : time start of all spiketrain 0 by default
            unit : unit of spike times, can be a str or directly a Quantities
        """
        unit = pq.Quantity(1, unit)

        seg = Segment(file_origin = os.path.basename(self.filename))
        if not cascade:
            return seg

        f = open(self.filename, 'Ur')
        for i,line in enumerate(f) :
            alldata = line[:-1].split(delimiter)
            if alldata[-1] == '': alldata = alldata[:-1]
            if alldata[0] == '': alldata = alldata[1:]
            if lazy:
                spike_times = [ ]
                t_stop = t_start
            else:
                spike_times = np.array(alldata).astype('f')
                t_stop = spike_times.max()*unit

            sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop)
            if lazy:
                sptr.lazy_shape = len(alldata)

            sptr.annotate(channel_index = i)
            seg.spiketrains.append(sptr)
        f.close()

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 19
0
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name="seg1")
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        rchan = RecordingChannel(name="rchan1")
        rchan.analogsignals = [signal]
        rchan.create_many_to_one_relationship()

        self.assertEqual(signal._container_child_objects, ())
        self.assertEqual(signal._data_child_objects, ())
        self.assertEqual(signal._single_parent_objects, ("Segment", "RecordingChannel"))
        self.assertEqual(signal._multi_child_objects, ())
        self.assertEqual(signal._multi_parent_objects, ())
        self.assertEqual(signal._child_properties, ())

        self.assertEqual(signal._single_child_objects, ())

        self.assertEqual(signal._container_child_containers, ())
        self.assertEqual(signal._data_child_containers, ())
        self.assertEqual(signal._single_child_containers, ())
        self.assertEqual(signal._single_parent_containers, ("segment", "recordingchannel"))
        self.assertEqual(signal._multi_child_containers, ())
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._child_objects, ())
        self.assertEqual(signal._child_containers, ())
        self.assertEqual(signal._parent_objects, ("Segment", "RecordingChannel"))
        self.assertEqual(signal._parent_containers, ("segment", "recordingchannel"))

        self.assertEqual(signal.children, ())
        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, "seg1")
        self.assertEqual(signal.parents[1].name, "rchan1")

        signal.create_many_to_one_relationship()
        signal.create_many_to_many_relationship()
        signal.create_relationship()
        assert_neo_object_is_compliant(signal)
Ejemplo n.º 20
0
    def read_segment(self, lazy=False):
        assert not lazy, 'Do not support lazy'

        data, metadata = self._read_file_contents()
        annotations = dict(
            (k, metadata.get(k, 'unknown'))
            for k in ("label", "variable", "first_id", "last_id"))
        seg = Segment(**annotations)
        if metadata['variable'] == 'spikes':
            for i in range(metadata['first_index'],
                           metadata['last_index'] + 1):
                spiketrain = self._extract_spikes(data, metadata, i)
                if spiketrain is not None:
                    seg.spiketrains.append(spiketrain)
            # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal
            seg.annotate(dt=metadata['dt'])
        else:
            signal = self._extract_signals(data, metadata)
            if signal is not None:
                seg.analogsignals.append(signal)
        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 21
0
    def test__children(self):
        segment = Segment(name="seg1")
        segment.spikes = [self.spike1]
        segment.create_many_to_one_relationship()

        unit = Unit(name="unit1")
        unit.spikes = [self.spike1]
        unit.create_many_to_one_relationship()

        self.assertEqual(self.spike1._container_child_objects, ())
        self.assertEqual(self.spike1._data_child_objects, ())
        self.assertEqual(self.spike1._single_parent_objects, ("Segment", "Unit"))
        self.assertEqual(self.spike1._multi_child_objects, ())
        self.assertEqual(self.spike1._multi_parent_objects, ())
        self.assertEqual(self.spike1._child_properties, ())

        self.assertEqual(self.spike1._single_child_objects, ())

        self.assertEqual(self.spike1._container_child_containers, ())
        self.assertEqual(self.spike1._data_child_containers, ())
        self.assertEqual(self.spike1._single_child_containers, ())
        self.assertEqual(self.spike1._single_parent_containers, ("segment", "unit"))
        self.assertEqual(self.spike1._multi_child_containers, ())
        self.assertEqual(self.spike1._multi_parent_containers, ())

        self.assertEqual(self.spike1._child_objects, ())
        self.assertEqual(self.spike1._child_containers, ())
        self.assertEqual(self.spike1._parent_objects, ("Segment", "Unit"))
        self.assertEqual(self.spike1._parent_containers, ("segment", "unit"))

        self.assertEqual(self.spike1.children, ())
        self.assertEqual(len(self.spike1.parents), 2)
        self.assertEqual(self.spike1.parents[0].name, "seg1")
        self.assertEqual(self.spike1.parents[1].name, "unit1")

        self.spike1.create_many_to_one_relationship()
        self.spike1.create_many_to_many_relationship()
        self.spike1.create_relationship()
        assert_neo_object_is_compliant(self.spike1)
Ejemplo n.º 22
0
    def read_segment(
        self,
        lazy=False,
        delimiter='\t',
        t_start=0. * pq.s,
        unit=pq.s,
    ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            t_start : time start of all spiketrain 0 by default
            unit : unit of spike times, can be a str or directly a Quantities
        """
        assert not lazy, 'Do not support lazy'

        unit = pq.Quantity(1, unit)

        seg = Segment(file_origin=os.path.basename(self.filename))

        with open(self.filename, 'r', newline=None) as f:
            for i, line in enumerate(f):
                alldata = line[:-1].split(delimiter)
                if alldata[-1] == '':
                    alldata = alldata[:-1]
                if alldata[0] == '':
                    alldata = alldata[1:]

                spike_times = np.array(alldata).astype('f')
                t_stop = spike_times.max() * unit

                sptr = SpikeTrain(spike_times * unit,
                                  t_start=t_start,
                                  t_stop=t_stop)

                sptr.annotate(channel_index=i)
                seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 23
0
    def read_segment(self, lazy=False, cascade=True):
        fid = open(self.filename, 'rb')
        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        # ~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=global_header['version'])
        seg.annotate(comment=global_header['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(global_header['nvar']):
            entity_header = HeaderReader(fid, EntityHeader).read_f(
                offset=offset + i * 208)
            entity_header['name'] = entity_header['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entity_header['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    t_stop=global_header['tend'] /
                    global_header['freq'] * pq.s,
                    name=entity_header['name'])
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    event_times = event_times.astype('f8') / global_header[
                        'freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = Event(times=event_times, labels=labels,
                             channel_name=entity_header['name'])
                if lazy:
                    evar.lazy_shape = entity_header['n']
                seg.events.append(evar)

            if entity_header['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    start_times = start_times.astype('f8') / global_header[
                        'freq'] * pq.s
                    stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                           shape=(entity_header['n']),
                                           offset=entity_header['offset'] +
                                           entity_header['n'] * 4)
                    stop_times = stop_times.astype('f') / global_header[
                        'freq'] * pq.s
                epar = Epoch(times=start_times,
                             durations=stop_times - start_times,
                             labels=np.array([''] * start_times.size,
                                             dtype='S'),
                             channel_name=entity_header['name'])
                if lazy:
                    epar.lazy_shape = entity_header['n']
                seg.epochs.append(epar)

            if entity_header['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s

                    waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
                                          shape=(entity_header['n'], 1,
                                                 entity_header['NPointsWave']),
                                          offset=entity_header['offset'] +
                                          entity_header['n'] * 4)
                    waveforms = (waveforms.astype('f') *
                                 entity_header['ADtoMV'] +
                                 entity_header['MVOffset']) * pq.mV
                t_stop = global_header['tend'] / global_header['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/
                    #~ globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop, name=entity_header['name'],
                    waveforms=waveforms,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms)
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 4:
                # popvectors
                pass

            if entity_header['type'] == 5:
                # analog
                timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
                                       shape=(entity_header['n']),
                                       offset=entity_header['offset'])
                timestamps = timestamps.astype('f8') / global_header['freq']
                fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                fragment_starts = fragment_starts.astype('f8') / global_header[
                    'freq']
                t_start = timestamps[0] - fragment_starts[0] / float(
                    entity_header['WFrequency'])
                del timestamps, fragment_starts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(self.filename, np.dtype('i2'), 'r',
                                       shape=(entity_header['NPointsWave']),
                                       offset=entity_header['offset'])
                    signal = signal.astype('f')
                    signal *= entity_header['ADtoMV']
                    signal += entity_header['MVOffset']
                    signal = signal * pq.mV

                ana_sig = AnalogSignal(
                    signal=signal, t_start=t_start * pq.s,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    name=entity_header['name'],
                    channel_index=entity_header['WireNumber'])
                if lazy:
                    ana_sig.lazy_shape = entity_header['NPointsWave']
                seg.analogsignals.append(ana_sig)

            if entity_header['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                      shape=(entity_header['n']),
                                      offset=entity_header['offset'])
                    times = times.astype('f8') / global_header['freq'] * pq.s
                    fid.seek(entity_header['offset'] + entity_header['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename, np.dtype(
                            'S' + str(entity_header['MarkerLength'])),
                        'r', shape=(entity_header['n']),
                        offset=entity_header['offset'] +
                        entity_header['n'] * 4 + 64)
                ea = Event(times=times,
                           labels=labels.view(np.ndarray),
                           name=entity_header['name'],
                           channel_index=entity_header['WireNumber'],
                           marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entity_header['n']
                seg.events.append(ea)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 24
0
    def read_segment(self , lazy = False, cascade = True):
        seg  = Segment(
                                    file_origin = os.path.basename(self.filename),
                                    )

        if not cascade:
            return seg

        fid = open(self.filename , 'rb')

        headertext = fid.read(2048)
        if PY3K:
            headertext = headertext.decode('ascii')
        header = {}
        for line in headertext.split('\r\n'):
            if '=' not in line : continue
            #print '#' , line , '#'
            key,val = line.split('=')
            if key in ['NC', 'NR','NBH','NBA','NBD','ADCMAX','NP','NZ','ADCMAX' ] :
                val = int(val)
            elif key in ['AD', 'DT', ] :
                val = val.replace(',','.')
                val = float(val)
            header[key] = val

        if not lazy:
            data = np.memmap(self.filename , np.dtype('i2')  , 'r',
                  #shape = (header['NC'], header['NP']) ,
                  shape = (header['NP']/header['NC'],header['NC'], ) ,
                  offset = header['NBH'])

        for c in range(header['NC']):

            YCF = float(header['YCF%d'%c].replace(',','.'))
            YAG = float(header['YAG%d'%c].replace(',','.'))
            YZ = float(header['YZ%d'%c].replace(',','.'))

            ADCMAX = header['ADCMAX']
            AD = header['AD']
            DT = header['DT']

            if 'TU' in header:
                if header['TU'] == 'ms':
                    DT *= .001

            unit = header['YU%d'%c]
            try :
                unit = pq.Quantity(1., unit)
            except:
                unit = pq.Quantity(1., '')

            if lazy:
                signal = [ ] * unit
            else:
                signal = (data[:,header['YO%d'%c]].astype('f4')-YZ) *AD/( YCF*YAG*(ADCMAX+1)) * unit

            ana = AnalogSignal(signal,
                               sampling_rate=pq.Hz / DT,
                               t_start=0. * pq.s,
                               name=header['YN%d' % c],
                               channel_index=c)
            if lazy:
                ana.lazy_shape = header['NP']/header['NC']

            seg.analogsignals.append(ana)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 25
0
def generate_one_simple_segment(seg_name='segment 0', supported_objects=[], nb_analogsignal=4,
                                t_start=0. * pq.s, sampling_rate=10 * pq.kHz, duration=6. * pq.s,

                                nb_spiketrain=6, spikerate_range=[.5 * pq.Hz, 12 * pq.Hz],

                                event_types={'stim': ['a', 'b', 'c', 'd'],
                                             'enter_zone': ['one', 'two'],
                                             'color': ['black', 'yellow', 'green'], },
                                event_size_range=[5, 20],

                                epoch_types={'animal state': ['Sleep', 'Freeze', 'Escape'],
                                             'light': ['dark', 'lighted']},
                                epoch_duration_range=[.5, 3.],
                                # this should be multiplied by pq.s, no?

                                array_annotations={'valid': np.array([True, False]),
                                                   'number': np.array(range(5))}

                                ):
    if supported_objects and Segment not in supported_objects:
        raise ValueError('Segment must be in supported_objects')
    seg = Segment(name=seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int((sampling_rate * duration).simplified)),
                                  sampling_rate=sampling_rate,
                                  t_start=t_start, units=pq.mV, channel_index=a,
                                  name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand() * np.diff(spikerate_range)
            spikerate += spikerate_range[0].magnitude
            # spikedata = rand(int((spikerate*duration).simplified))*duration
            # sptr = SpikeTrain(spikedata,
            #                  t_start=t_start, t_stop=t_start+duration)
            #                  #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate * duration).simplified))
            spikes.sort()  # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain(spikes * duration, t_start=t_start, t_stop=t_start + duration)
            sptr.annotations['channel_index'] = s
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(len(spikes)) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            sptr.array_annotate(**arr_ann)
            seg.spiketrains.append(sptr)

    if Event in supported_objects:
        for name, labels in event_types.items():
            evt_size = rand() * np.diff(event_size_range)
            evt_size += event_size_range[0]
            evt_size = int(evt_size)
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(evt_size) * len(labels)).astype('i')]
            evt = Event(times=rand(evt_size) * duration, labels=labels)
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(evt_size) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            evt.array_annotate(**arr_ann)
            seg.events.append(evt)

    if Epoch in supported_objects:
        for name, labels in epoch_types.items():
            t = 0
            times = []
            durations = []
            while t < duration:
                times.append(t)
                dur = rand() * (epoch_duration_range[1] - epoch_duration_range[0])
                dur += epoch_duration_range[0]
                durations.append(dur)
                t = t + dur
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(len(times)) * len(labels)).astype('i')]
            assert len(times) == len(durations)
            assert len(times) == len(labels)
            epc = Epoch(times=pq.Quantity(times, units=pq.s),
                        durations=pq.Quantity(durations, units=pq.s),
                        labels=labels,)
            assert epc.times.dtype == 'float'
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(len(times)) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            epc.array_annotate(**arr_ann)
            seg.epochs.append(epc)

    # TODO : Spike, Event

    seg.create_many_to_one_relationship()
    return seg
Ejemplo n.º 26
0
def generate_one_simple_segment(
    seg_name='segment 0',
    supported_objects=[],
    nb_analogsignal=4,
    t_start=0. * pq.s,
    sampling_rate=10 * pq.kHz,
    duration=6. * pq.s,
    nb_spiketrain=6,
    spikerate_range=[.5 * pq.Hz, 12 * pq.Hz],
    event_array_types={
        'stim': ['a', 'b', 'c', 'd'],
        'enter_zone': ['one', 'two'],
        'color': ['black', 'yellow', 'green'],
    },
    event_array_size_range=[5, 20],
    epoch_array_types={
        'animal state': ['Sleep', 'Freeze', 'Escape'],
        'light': ['dark', 'lighted']
    },
    epoch_array_duration_range=[.5, 3.],
):
    if supported_objects and Segment not in supported_objects:
        raise ValueError('Segment must be in supported_objects')
    seg = Segment(name=seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int(sampling_rate * duration)),
                                  sampling_rate=sampling_rate,
                                  t_start=t_start,
                                  units=pq.mV,
                                  channel_index=a,
                                  name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand() * np.diff(spikerate_range)
            spikerate += spikerate_range[0].magnitude
            #spikedata = rand(int((spikerate*duration).simplified))*duration
            #sptr = SpikeTrain(spikedata,
            #                  t_start=t_start, t_stop=t_start+duration)
            #                  #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate * duration).simplified))
            spikes.sort()  # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain(spikes * duration,
                              t_start=t_start,
                              t_stop=t_start + duration)
            sptr.annotations['channel_index'] = s
            seg.spiketrains.append(sptr)

    if EventArray in supported_objects:
        for name, labels in iteritems(event_array_types):
            ea_size = rand() * np.diff(event_array_size_range)
            ea_size += event_array_size_range[0]
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(ea_size) * len(labels)).astype('i')]
            ea = EventArray(times=rand(ea_size) * duration, labels=labels)
            seg.eventarrays.append(ea)

    if EpochArray in supported_objects:
        for name, labels in iteritems(epoch_array_types):
            t = 0
            times = []
            durations = []
            while t < duration:
                times.append(t)
                dur = rand() * np.diff(epoch_array_duration_range)
                dur += epoch_array_duration_range[0]
                durations.append(dur)
                t = t + dur
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(len(times)) * len(labels)).astype('i')]
            epa = EpochArray(
                times=pq.Quantity(times, units=pq.s),
                durations=pq.Quantity([x[0] for x in durations], units=pq.s),
                labels=labels,
            )
            seg.epocharrays.append(epa)

    # TODO : Spike, Event, Epoch

    seg.create_many_to_one_relationship()
    return seg
Ejemplo n.º 27
0
    def read_segment(
        self,
        cascade=True,
        lazy=False,
    ):
        """
        Arguments:
        """
        f = struct_file(self.filename, 'rb')

        #Name
        f.seek(64, 0)
        surname = f.read(22)
        while surname[-1] == ' ':
            if len(surname) == 0: break
            surname = surname[:-1]
        firstname = f.read(20)
        while firstname[-1] == ' ':
            if len(firstname) == 0: break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(
            name=firstname + ' ' + surname,
            file_origin=os.path.basename(self.filename),
        )
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            return seg

        # area
        f.seek(176, 0)
        zone_names = [
            'ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E',
            'MONTAGE', 'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
            'EVENT B', 'TRIGGER'
        ]
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((rawdata.size / Num_Chan, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromfile(f, dtype='u2', count=Num_Chan)

        units = {
            -1: pq.nano * pq.V,
            0: pq.uV,
            1: pq.mV,
            2: 1,
            100: pq.percent,
            101: pq.dimensionless,
            102: pq.dimensionless
        }

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip("\x00")
            ground = f.read(6).strip("\x00")
            logical_min, logical_max, logical_ground, physical_min, physical_max = f.read_f(
                'iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max -
                               physical_min) / float(logical_max -
                                                     logical_min + 1)
                signal = (rawdata[:, c].astype('f') -
                          logical_ground) * factor * unit

            anaSig = AnalogSignal(signal,
                                  sampling_rate=sampling_rate,
                                  name=label,
                                  channel_index=c)
            if lazy:
                anaSig.lazy_shape = None
            anaSig.annotate(ground=ground)

            seg.analogsignals.append(anaSig)

        sampling_rate = np.mean(
            [anaSig.sampling_rate for anaSig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(
                f.read(length),
                dtype=[('pos', 'u4'), ('label', label_dtype)],
            )
            ea = EventArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (triggers['pos'] !=
                                                           0)
                triggers = triggers[keep]
                ea.labels = triggers['label'].astype('S')
                ea.times = (triggers['pos'] / sampling_rate).rescale('s')
            else:
                ea.lazy_shape = triggers.size
            seg.eventarrays.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[
                                       ('label', 'u4'),
                                       ('start', 'u4'),
                                       ('stop', 'u4'),
                                   ])
            ep = EpochArray(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (epochs['stop'] <
                                                           rawdata.shape[0])
                epochs = epochs[keep]
                ep.labels = epochs['label'].astype('S')
                ep.times = (epochs['start'] / sampling_rate).rescale('s')
                ep.durations = ((epochs['stop'] - epochs['start']) /
                                sampling_rate).rescale('s')
            else:
                ep.lazy_shape = triggers.size
            seg.epocharrays.append(ep)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 28
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """

        """

        fid = open(self.filename, "rb")
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader["Year"],
            globalHeader["Month"],
            globalHeader["Day"],
            globalHeader["Hour"],
            globalHeader["Minute"],
            globalHeader["Second"],
        )
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(plexon_version=globalHeader["Version"])

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = sipkes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader["NumDSPChannels"]):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
            channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
            dspChannelHeaders[channelHeader["Channel"]] = channelHeader
            maxunit = max(channelHeader["NUnits"], maxunit)
            maxchan = max(channelHeader["Channel"], maxchan)

        # event channel header
        eventHeaders = {}
        for _ in range(globalHeader["NumEventChannels"]):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader["Channel"]] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader["NumSlowChannels"]):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype="f")

        # spiketimes and waveform
        nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
        wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")

        # eventarrays
        nb_events = {}
        # maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            # maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() != -1:
            # read block header
            dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
            if dataBlockHeader is None:
                break
            chan = dataBlockHeader["Channel"]
            unit = dataBlockHeader["Unit"]
            n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
            time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]

            if dataBlockHeader["Type"] == 1:
                nb_spikes[chan, unit] += 1
                wf_sizes[chan, unit, :] = [n1, n2]
                fid.seek(n1 * n2 * 2, 1)
            elif dataBlockHeader["Type"] == 4:
                # event
                nb_events[chan] += 1
            elif dataBlockHeader["Type"] == 5:
                # continuous signal
                fid.seek(n2 * 2, 1)
                if n2 > 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] == 0:
                    t_starts[chan] = time

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = {}
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])

            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit], dtype="f")
                if load_spike_waveform:
                    n1, n2 = wf_sizes[chan, unit, :]
                    swfarrays[chan, unit] = np.zeros((nb_spikes[chan, unit], n1, n2), dtype="f4")
            pos_spikes = np.zeros(nb_spikes.shape, dtype="i")

            # allocating mem for event
            eventpositions = {}
            evarrays = {}
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = np.zeros(nb, dtype="f")
                eventpositions[chan] = 0

            fid.seek(start)
            while fid.tell() != -1:
                dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
                if dataBlockHeader is None:
                    break
                chan = dataBlockHeader["Channel"]
                n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
                time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]
                time /= globalHeader["ADFrequency"]

                if n2 < 0:
                    break
                if dataBlockHeader["Type"] == 1:
                    # spike
                    unit = dataBlockHeader["Unit"]
                    pos = pos_spikes[chan, unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1 * n2 != 0:
                        swfarrays[chan, unit][pos, :, :] = (
                            np.fromstring(fid.read(n1 * n2 * 2), dtype="i2").reshape(n1, n2).astype("f4")
                        )
                    else:
                        fid.seek(n1 * n2 * 2, 1)
                    pos_spikes[chan, unit] += 1

                elif dataBlockHeader["Type"] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan][pos] = time
                    eventpositions[chan] += 1

                elif dataBlockHeader["Type"] == 5:
                    # signal
                    data = np.fromstring(fid.read(n2 * 2), dtype="i2").astype("f4")
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan] + data.size] = data
                    sample_positions[chan] += data.size

        ## Step 3: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
            else:
                times = evarrays[chan]
            ea = EventArray(times * pq.s, channel_name=eventHeaders[chan]["Name"], channel_index=chan)
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = []
            else:
                if globalHeader["Version"] == 100 or globalHeader["Version"] == 101:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * 1000.0)
                elif globalHeader["Version"] == 102:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * slowChannelHeaders[chan]["PreampGain"])
                elif globalHeader["Version"] >= 103:
                    gain = globalHeader["SlowMaxMagnitudeMV"] / (
                        0.5
                        * (2 ** globalHeader["BitsPerSpikeSample"])
                        * slowChannelHeaders[chan]["Gain"]
                        * slowChannelHeaders[chan]["PreampGain"]
                    )
                signal = sigarrays[chan] * gain
            anasig = AnalogSignal(
                signal * pq.V,
                sampling_rate=float(slowChannelHeaders[chan]["ADFreq"]) * pq.Hz,
                t_start=t_starts[chan] * pq.s,
                channel_index=slowChannelHeaders[chan]["Channel"],
                channel_name=slowChannelHeaders[chan]["Name"],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)

        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0:
                continue
            if lazy:
                times = []
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan, unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader["Version"] < 103:
                        gain = 3000.0 / (2048 * dspChannelHeaders[chan]["Gain"] * 1000.0)
                    elif globalHeader["Version"] >= 103 and globalHeader["Version"] < 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * 1000.0
                        )
                    elif globalHeader["Version"] > 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * globalHeader["SpikePreAmpGain"]
                        )
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(times, units="s", t_stop=t_stop * pq.s, waveforms=waveforms)
            sptr.annotate(unit_name=dspChannelHeaders[chan]["Name"])
            sptr.annotate(channel_index=chan)
            if lazy:
                sptr.lazy_shape = nb_spikes[chan, unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 29
0
    def read_segment(self, lazy=False):
        """

        """
        if lazy:
            raise NotImplementedError("lazy mode not supported")

        seg = Segment(file_origin=os.path.basename(self.filename))

        # loadtxt
        if self.method == 'genfromtxt':
            sig = np.genfromtxt(self.filename,
                                delimiter=self.delimiter,
                                usecols=self.usecols,
                                skip_header=self.skiprows,
                                dtype='f')
            if len(sig.shape) == 1:
                sig = sig[:, np.newaxis]
        elif self.method == 'csv':
            with open(self.filename, 'rU') as fp:
                tab = [l for l in csv.reader(fp, delimiter=self.delimiter)]
            tab = tab[self.skiprows:]
            sig = np.array(tab, dtype='f')
            if self.usecols is not None:
                mask = np.array(self.usecols)
                sig = sig[:, mask]
        elif self.method == 'homemade':
            fid = open(self.filename, 'rU')
            for l in range(self.skiprows):
                fid.readline()
            tab = []
            for line in fid.readlines():
                line = line.replace('\r', '')
                line = line.replace('\n', '')
                parts = line.split(self.delimiter)
                while '' in parts:
                    parts.remove('')
                tab.append(parts)
            sig = np.array(tab, dtype='f')
            if self.usecols is not None:
                mask = np.array(self.usecols)
                sig = sig[:, mask]
        else:
            sig = self.method(self.filename, self.usecols)
            if not isinstance(sig, np.ndarray):
                raise TypeError("method function must return a NumPy array")
            if len(sig.shape) == 1:
                sig = sig[:, np.newaxis]
            elif len(sig.shape) != 2:
                raise ValueError(
                    "method function must return a 1D or 2D NumPy array")

        if self.timecolumn is None:
            sampling_rate = self.sampling_rate
            t_start = self.t_start
        else:
            delta_t = np.diff(sig[:, self.timecolumn])
            mean_delta_t = np.mean(delta_t)
            if (delta_t.max() - delta_t.min()) / mean_delta_t < 1e-6:
                # equally spaced --> AnalogSignal
                sampling_rate = 1.0 / np.mean(np.diff(
                    sig[:, self.timecolumn])) / self.time_units
            else:
                # not equally spaced --> IrregularlySampledSignal
                sampling_rate = None
            t_start = sig[0, self.timecolumn] * self.time_units

        if self.signal_group_mode == 'all-in-one':
            if self.timecolumn is not None:
                mask = list(range(sig.shape[1]))
                if self.timecolumn >= 0:
                    mask.remove(self.timecolumn)
                else:  # allow negative column index
                    mask.remove(sig.shape[1] + self.timecolumn)
                signal = sig[:, mask]
            else:
                signal = sig
            if sampling_rate is None:
                irr_sig = IrregularlySampledSignal(signal[:, self.timecolumn] *
                                                   self.time_units,
                                                   signal * self.units,
                                                   name='multichannel')
                seg.irregularlysampledsignals.append(irr_sig)
            else:
                ana_sig = AnalogSignal(signal * self.units,
                                       sampling_rate=sampling_rate,
                                       t_start=t_start,
                                       channel_index=self.usecols
                                       or np.arange(signal.shape[1]),
                                       name='multichannel')
                seg.analogsignals.append(ana_sig)
        else:
            if self.timecolumn is not None and self.timecolumn < 0:
                time_col = sig.shape[1] + self.timecolumn
            else:
                time_col = self.timecolumn
            for i in range(sig.shape[1]):
                if time_col == i:
                    continue
                signal = sig[:, i] * self.units
                if sampling_rate is None:
                    irr_sig = IrregularlySampledSignal(sig[:, time_col] *
                                                       self.time_units,
                                                       signal,
                                                       t_start=t_start,
                                                       channel_index=i,
                                                       name='Column %d' % i)
                    seg.irregularlysampledsignals.append(irr_sig)
                else:
                    ana_sig = AnalogSignal(signal,
                                           sampling_rate=sampling_rate,
                                           t_start=t_start,
                                           channel_index=i,
                                           name='Column %d' % i)
                    seg.analogsignals.append(ana_sig)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 30
0
    def read_segment(self,
                     # the 2 first keyword arguments are imposed by neo.io API
                     lazy = False,
                     cascade = True,
                     # all following arguments are decied by this IO and are free
                     segment_duration = 15.,
                     num_analogsignal = 4,
                     num_spiketrain_by_channel = 3,
                    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000. #Hz
        t_start = -1.


        #time vector for generated signal
        timevect = np.arange(t_start, t_start+ segment_duration , 1./sampling_rate)

        # create an empty segment
        seg = Segment( name = 'it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
                                            channel_index = i ,segment_duration = segment_duration, t_start = t_start)
                seg.analogsignals += [ ana ]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(lazy = lazy , cascade = cascade ,
                                                            segment_duration = segment_duration, t_start = t_start , channel_index = i)
                    seg.spiketrains += [ sptr ]


            # create an Event that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to Event
            # for that you need read_segment(cascade = True)

            if lazy:
                # in lazy case no data are readed
                # eva is empty
                eva = Event()
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva = Event(timevect[(np.random.rand(n)*timevect.size).astype('i')]* pq.s)
                # all duration are the same
                eva.durations = np.ones(n)*500*pq.ms  # Event doesn't have durations. Is Epoch intended here?
                # label
                l = [ ]
                for i in range(n):
                    if np.random.rand()>.6: l.append( 'TriggerA' )
                    else : l.append( 'TriggerB' )
                eva.labels = np.array( l )

            seg.events += [ eva ]

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 31
0
    def read_segment(self,
                     # the 2 first keyword arguments are imposed by neo.io API
                     lazy = False,
                     cascade = True,
                     # all following arguments are decided by this IO and are free
                     t_start = 0.,
                     segment_duration = 0.,
                    ):
        """
        Return a Segment containing all analog and spike channels, as well as
        all trigger events.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment
            
        """
        #if no segment duration is given, use the complete file
        if segment_duration == 0. :
            segment_duration=float(self.metadata["TimeSpan"])
        #if the segment duration is bigger than file, use the complete file
        if segment_duration >=float(self.metadata["TimeSpan"]):
            segment_duration=float(self.metadata["TimeSpan"])
        #if the time sum of start point and segment duration is bigger than
        #the file time span, cap it at the end
        if segment_duration+t_start>float(self.metadata["TimeSpan"]):
            segment_duration = float(self.metadata["TimeSpan"])-t_start
        
        # create an empty segment
        seg = Segment( name = "segment from the NeuroshareapiIO")

        if cascade:
            # read nested analosignal
            
            if self.metadata["num_analogs"] == 0:
                print ("no analog signals in this file!")
            else:
                #run through the number of analog channels found at the __init__ function
                for i in range(self.metadata["num_analogs"]):
                    #create an analog signal object for each channel found
                    ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
                                             channel_index = self.metadata["elecChanId"][i],
                                            segment_duration = segment_duration, t_start=t_start)
                    #add analog signal read to segment object
                    seg.analogsignals += [ ana ]
            
            # read triggers (in this case without any duration)
            for i in range(self.metadata["num_trigs"]):
                #create event object for each trigger/bit found
                eva = self.read_eventarray(lazy = lazy , 
                                           cascade = cascade,
                                           channel_index = self.metadata["triggersId"][i],
                                           segment_duration = segment_duration,
                                           t_start = t_start,)
                #add event object to segment
                seg.eventarrays +=  [eva]
            #read epochs (digital events with duration)
            for i in range(self.metadata["num_digiEpochs"]):
                #create event object for each trigger/bit found
                epa = self.read_epocharray(lazy = lazy, 
                                           cascade = cascade,
                                           channel_index = self.metadata["digiEpochId"][i],
                                            segment_duration = segment_duration,
                                            t_start = t_start,)
                #add event object to segment
                seg.epocharrays +=  [epa]
            # read nested spiketrain
            #run through all spike channels found
            for i in range(self.metadata["num_spkChans"]):
                #create spike object
                sptr = self.read_spiketrain(lazy = lazy, cascade = cascade,
                        channel_index = self.metadata["spkChanId"][i],
                        segment_duration = segment_duration,
                        t_start = t_start)
                #add the spike object to segment
                seg.spiketrains += [sptr]

        seg.create_many_to_one_relationship()
        
        return seg
Ejemplo n.º 32
0
    def read_segment(self, lazy = False, cascade = True):

        ## Read header file (vhdr)
        header = readBrainSoup(self.filename)

        assert header['Common Infos']['DataFormat'] == 'BINARY', NotImplementedError
        assert header['Common Infos']['DataOrientation'] == 'MULTIPLEXED', NotImplementedError
        nb_channel = int(header['Common Infos']['NumberOfChannels'])
        sampling_rate = 1.e6/float(header['Common Infos']['SamplingInterval']) * pq.Hz

        fmt = header['Binary Infos']['BinaryFormat']
        fmts = { 'INT_16':np.int16, 'IEEE_FLOAT_32':np.float32,}
        assert fmt in fmts, NotImplementedError
        dt = fmts[fmt]

        seg = Segment(file_origin = os.path.basename(self.filename), )
        if not cascade : return seg

        # read binary
        if not lazy:
            binary_file = os.path.splitext(self.filename)[0]+'.eeg'
            sigs = np.memmap(binary_file , dt, 'r', ).astype('f')

            n = int(sigs.size/nb_channel)
            sigs = sigs[:n*nb_channel]
            sigs = sigs.reshape(n, nb_channel)

        for c in range(nb_channel):
            name, ref, res, units = header['Channel Infos']['Ch%d' % (c+1,)].split(',')
            units = pq.Quantity(1, units.replace('µ', 'u') )
            if lazy:
                signal = [ ]*units
            else:
                signal = sigs[:,c]*units
            anasig = AnalogSignal(signal = signal,
                                                channel_index = c,
                                                name = name,
                                                sampling_rate = sampling_rate,
                                                )
            if lazy:
                anasig.lazy_shape = -1
            seg.analogsignals.append(anasig)

        # read marker
        marker_file = os.path.splitext(self.filename)[0]+'.vmrk'
        all_info = readBrainSoup(marker_file)['Marker Infos']
        all_types = [ ]
        times = [ ]
        labels = [ ]
        for i in range(len(all_info)):
            type_, label, pos, size, channel = all_info['Mk%d' % (i+1,)].split(',')[:5]
            all_types.append(type_)
            times.append(float(pos)/sampling_rate.magnitude)
            labels.append(label)
        all_types = np.array(all_types)
        times = np.array(times) * pq.s
        labels = np.array(labels, dtype = 'S')
        for type_ in np.unique(all_types):
            ind = type_  == all_types
            if lazy:
                ea = EventArray(name = str(type_))
                ea.lazy_shape = -1
            else:
                ea = EventArray( times = times[ind],
                                    labels  = labels[ind],
                                    name = str(type_),
                                    )
            seg.eventarrays.append(ea)


        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 33
0
    def read_segment(self, block_index=0, seg_index=0, lazy=False,
                     signal_group_mode=None, load_waveforms=False, time_slice=None,
                     strict_slicing=True):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default.

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.

        :param strict_slicing: True by default.
             Control if an error is raise or not when one of  time_slice member (t_start or t_stop)
             is outside the real time range of the segment.
        """

        if lazy:
            assert time_slice is None, 'For lazy=true you must specify time_slice when loading'

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        # AnalogSignal
        signal_channels = self.header['signal_channels']
        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
                        channel_indexes,
                        signal_group_mode=signal_group_mode).items():
                    # make a proxy...
                    anasig = AnalogSignalProxy(rawio=self, global_channel_indexes=ind_abs,
                                    block_index=block_index, seg_index=seg_index)

                    if not lazy:
                        # ... and get the real AnalogSIgnal if not lazy
                        anasig = anasig.load(time_slice=time_slice, strict_slicing=strict_slicing)
                        # TODO magnitude_mode='rescaled'/'raw'

                    anasig.segment = seg
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            # make a proxy...
            sptr = SpikeTrainProxy(rawio=self, unit_index=unit_index,
                                                block_index=block_index, seg_index=seg_index)

            if not lazy:
                # ... and get the real SpikeTrain if not lazy
                sptr = sptr.load(time_slice=time_slice, strict_slicing=strict_slicing,
                                        load_waveforms=load_waveforms)
                # TODO magnitude_mode='rescaled'/'raw'

            sptr.segment = seg
            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if event_channels['type'][chan_ind] == b'event':
                e = EventProxy(rawio=self, event_channel_index=chan_ind,
                                        block_index=block_index, seg_index=seg_index)
                if not lazy:
                    e = e.load(time_slice=time_slice, strict_slicing=strict_slicing)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = EpochProxy(rawio=self, event_channel_index=chan_ind,
                                        block_index=block_index, seg_index=seg_index)
                if not lazy:
                    e = e.load(time_slice=time_slice, strict_slicing=strict_slicing)
                e.segment = seg
                seg.epochs.append(e)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 34
0
    def read_segment(self,
                     block_index=0,
                     seg_index=0,
                     lazy=False,
                     signal_group_mode=None,
                     load_waveforms=False,
                     time_slice=None):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default. 

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.
        """

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(
            self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        seg_t_start = self.segment_t_start(block_index, seg_index) * pq.s
        seg_t_stop = self.segment_t_stop(block_index, seg_index) * pq.s

        # get only a slice of objects limited by t_start and t_stop time_slice = (t_start, t_stop)
        if time_slice is None:
            t_start, t_stop = None, None
            t_start_, t_stop_ = None, None
        else:
            assert not lazy, 'time slice only work when not lazy'
            t_start, t_stop = time_slice

            t_start = ensure_second(t_start)
            t_stop = ensure_second(t_stop)

            # checks limits
            if t_start < seg_t_start:
                t_start = seg_t_start
            if t_stop > seg_t_stop:
                t_stop = seg_t_stop

            # in float format in second (for rawio clip)
            t_start_, t_stop_ = float(t_start.magnitude), float(
                t_stop.magnitude)

            # new spiketrain limits
            seg_t_start = t_start
            seg_t_stop = t_stop

        # AnalogSignal
        signal_channels = self.header['signal_channels']

        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                sr = self.get_signal_sampling_rate(channel_indexes) * pq.Hz
                sig_t_start = self.get_signal_t_start(block_index, seg_index,
                                                      channel_indexes) * pq.s

                sig_size = self.get_signal_size(
                    block_index=block_index,
                    seg_index=seg_index,
                    channel_indexes=channel_indexes)
                if not lazy:
                    # in case of time_slice get: get i_start, i_stop, new sig_t_start
                    if t_stop is not None:
                        i_stop = int(
                            (t_stop - sig_t_start).magnitude * sr.magnitude)
                        if i_stop > sig_size:
                            i_stop = sig_size
                    else:
                        i_stop = None
                    if t_start is not None:
                        i_start = int(
                            (t_start - sig_t_start).magnitude * sr.magnitude)
                        if i_start < 0:
                            i_start = 0
                        sig_t_start += (i_start / sr).rescale('s')
                    else:
                        i_start = None

                    raw_signal = self.get_analogsignal_chunk(
                        block_index=block_index,
                        seg_index=seg_index,
                        i_start=i_start,
                        i_stop=i_stop,
                        channel_indexes=channel_indexes)
                    float_signal = self.rescale_signal_raw_to_float(
                        raw_signal,
                        dtype='float32',
                        channel_indexes=channel_indexes)

                for i, (ind_within,
                        ind_abs) in self._make_signal_channel_subgroups(
                            channel_indexes,
                            signal_group_mode=signal_group_mode).items():
                    units = np.unique(signal_channels[ind_abs]['units'])
                    assert len(units) == 1
                    units = ensure_signal_units(units[0])

                    if signal_group_mode == 'split-all':
                        # in that case annotations by channel is OK
                        chan_index = ind_abs[0]
                        d = self.raw_annotations['blocks'][block_index][
                            'segments'][seg_index]['signals'][chan_index]
                        annotations = dict(d)
                        if 'name' not in annotations:
                            annotations['name'] = signal_channels['name'][
                                chan_index]
                    else:
                        # when channel are grouped by same unit
                        # annotations are empty...
                        annotations = {}
                        annotations['name'] = 'Channel bundle ({}) '.format(
                            ','.join(signal_channels[ind_abs]['name']))
                    annotations = check_annotations(annotations)
                    if lazy:
                        anasig = AnalogSignal(np.array([]),
                                              units=units,
                                              copy=False,
                                              sampling_rate=sr,
                                              t_start=sig_t_start,
                                              **annotations)
                        anasig.lazy_shape = (sig_size, len(ind_within))
                    else:
                        anasig = AnalogSignal(float_signal[:, ind_within],
                                              units=units,
                                              copy=False,
                                              sampling_rate=sr,
                                              t_start=sig_t_start,
                                              **annotations)
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            if not lazy and load_waveforms:
                raw_waveforms = self.get_spike_raw_waveforms(
                    block_index=block_index,
                    seg_index=seg_index,
                    unit_index=unit_index,
                    t_start=t_start_,
                    t_stop=t_stop_)
                float_waveforms = self.rescale_waveforms_to_float(
                    raw_waveforms, dtype='float32', unit_index=unit_index)
                wf_units = ensure_signal_units(
                    unit_channels['wf_units'][unit_index])
                waveforms = pq.Quantity(float_waveforms,
                                        units=wf_units,
                                        dtype='float32',
                                        copy=False)
                wf_sampling_rate = unit_channels['wf_sampling_rate'][
                    unit_index]
                wf_left_sweep = unit_channels['wf_left_sweep'][unit_index]
                if wf_left_sweep > 0:
                    wf_left_sweep = float(
                        wf_left_sweep) / wf_sampling_rate * pq.s
                else:
                    wf_left_sweep = None
                wf_sampling_rate = wf_sampling_rate * pq.Hz
            else:
                waveforms = None
                wf_left_sweep = None
                wf_sampling_rate = None

            d = self.raw_annotations['blocks'][block_index]['segments'][
                seg_index]['units'][unit_index]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = unit_channels['name'][c]
            annotations = check_annotations(annotations)

            if not lazy:
                spike_timestamp = self.get_spike_timestamps(
                    block_index=block_index,
                    seg_index=seg_index,
                    unit_index=unit_index,
                    t_start=t_start_,
                    t_stop=t_stop_)
                spike_times = self.rescale_spike_timestamp(
                    spike_timestamp, 'float64')
                sptr = SpikeTrain(spike_times,
                                  units='s',
                                  copy=False,
                                  t_start=seg_t_start,
                                  t_stop=seg_t_stop,
                                  waveforms=waveforms,
                                  left_sweep=wf_left_sweep,
                                  sampling_rate=wf_sampling_rate,
                                  **annotations)
            else:
                nb = self.spike_count(block_index=block_index,
                                      seg_index=seg_index,
                                      unit_index=unit_index)
                sptr = SpikeTrain(np.array([]),
                                  units='s',
                                  copy=False,
                                  t_start=seg_t_start,
                                  t_stop=seg_t_stop,
                                  **annotations)
                sptr.lazy_shape = (nb, )

            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if not lazy:
                ev_timestamp, ev_raw_durations, ev_labels = self.get_event_timestamps(
                    block_index=block_index,
                    seg_index=seg_index,
                    event_channel_index=chan_ind,
                    t_start=t_start_,
                    t_stop=t_stop_)
                ev_times = self.rescale_event_timestamp(
                    ev_timestamp, 'float64') * pq.s
                if ev_raw_durations is None:
                    ev_durations = None
                else:
                    ev_durations = self.rescale_epoch_duration(
                        ev_raw_durations, 'float64') * pq.s
                ev_labels = ev_labels.astype('S')
            else:
                nb = self.event_count(block_index=block_index,
                                      seg_index=seg_index,
                                      event_channel_index=chan_ind)
                lazy_shape = (nb, )
                ev_times = np.array([]) * pq.s
                ev_labels = np.array([], dtype='S')
                ev_durations = np.array([]) * pq.s

            d = self.raw_annotations['blocks'][block_index]['segments'][
                seg_index]['events'][chan_ind]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = event_channels['name'][chan_ind]

            annotations = check_annotations(annotations)

            if event_channels['type'][chan_ind] == b'event':
                e = Event(times=ev_times,
                          labels=ev_labels,
                          units='s',
                          copy=False,
                          **annotations)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = Epoch(times=ev_times,
                          durations=ev_durations,
                          labels=ev_labels,
                          units='s',
                          copy=False,
                          **annotations)
                e.segment = seg
                seg.epochs.append(e)

            if lazy:
                e.lazy_shape = lazy_shape

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 35
0
    def read_segment(self, block_index=0, seg_index=0, lazy=False,
                     signal_group_mode=None, load_waveforms=False, time_slice=None):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default.

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.
        """

        if lazy:
            warnings.warn(
                "Lazy is deprecated and will be replaced by ProxyObject functionality.",
                DeprecationWarning)

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        seg_t_start = self.segment_t_start(block_index, seg_index) * pq.s
        seg_t_stop = self.segment_t_stop(block_index, seg_index) * pq.s

        # get only a slice of objects limited by t_start and t_stop time_slice = (t_start, t_stop)
        if time_slice is None:
            t_start, t_stop = None, None
            t_start_, t_stop_ = None, None
        else:
            assert not lazy, 'time slice only work when not lazy'
            t_start, t_stop = time_slice

            t_start = ensure_second(t_start)
            t_stop = ensure_second(t_stop)

            # checks limits
            if t_start < seg_t_start:
                t_start = seg_t_start
            if t_stop > seg_t_stop:
                t_stop = seg_t_stop

            # in float format in second (for rawio clip)
            t_start_, t_stop_ = float(t_start.magnitude), float(t_stop.magnitude)

            # new spiketrain limits
            seg_t_start = t_start
            seg_t_stop = t_stop

        # AnalogSignal
        signal_channels = self.header['signal_channels']

        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                sr = self.get_signal_sampling_rate(channel_indexes) * pq.Hz
                sig_t_start = self.get_signal_t_start(
                    block_index, seg_index, channel_indexes) * pq.s

                sig_size = self.get_signal_size(block_index=block_index, seg_index=seg_index,
                                                channel_indexes=channel_indexes)
                if not lazy:
                    # in case of time_slice get: get i_start, i_stop, new sig_t_start
                    if t_stop is not None:
                        i_stop = int((t_stop - sig_t_start).magnitude * sr.magnitude)
                        if i_stop > sig_size:
                            i_stop = sig_size
                    else:
                        i_stop = None
                    if t_start is not None:
                        i_start = int((t_start - sig_t_start).magnitude * sr.magnitude)
                        if i_start < 0:
                            i_start = 0
                        sig_t_start += (i_start / sr).rescale('s')
                    else:
                        i_start = None

                    raw_signal = self.get_analogsignal_chunk(block_index=block_index,
                                                             seg_index=seg_index, i_start=i_start,
                                                             i_stop=i_stop,
                                                             channel_indexes=channel_indexes)
                    float_signal = self.rescale_signal_raw_to_float(
                        raw_signal,
                        dtype='float32',
                        channel_indexes=channel_indexes)

                for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
                        channel_indexes,
                        signal_group_mode=signal_group_mode).items():
                    units = np.unique(signal_channels[ind_abs]['units'])
                    assert len(units) == 1
                    units = ensure_signal_units(units[0])

                    if signal_group_mode == 'split-all':
                        # in that case annotations by channel is OK
                        chan_index = ind_abs[0]
                        d = self.raw_annotations['blocks'][block_index]['segments'][seg_index][
                            'signals'][chan_index]
                        annotations = dict(d)
                        if 'name' not in annotations:
                            annotations['name'] = signal_channels['name'][chan_index]
                    else:
                        # when channel are grouped by same unit
                        # annotations have channel_names and channel_ids array
                        # this will be moved in array annotations soon
                        annotations = {}
                        annotations['name'] = 'Channel bundle ({}) '.format(
                            ','.join(signal_channels[ind_abs]['name']))
                        annotations['channel_names'] = signal_channels[ind_abs]['name']
                        annotations['channel_ids'] = signal_channels[ind_abs]['id']
                    annotations = check_annotations(annotations)
                    if lazy:
                        anasig = AnalogSignal(np.array([]), units=units, copy=False,
                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
                        anasig.lazy_shape = (sig_size, len(ind_within))
                    else:
                        anasig = AnalogSignal(float_signal[:, ind_within], units=units, copy=False,
                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            if not lazy and load_waveforms:
                raw_waveforms = self.get_spike_raw_waveforms(block_index=block_index,
                                                             seg_index=seg_index,
                                                             unit_index=unit_index,
                                                             t_start=t_start_, t_stop=t_stop_)
                float_waveforms = self.rescale_waveforms_to_float(raw_waveforms, dtype='float32',
                                                                  unit_index=unit_index)
                wf_units = ensure_signal_units(unit_channels['wf_units'][unit_index])
                waveforms = pq.Quantity(float_waveforms, units=wf_units,
                                        dtype='float32', copy=False)
                wf_sampling_rate = unit_channels['wf_sampling_rate'][unit_index]
                wf_left_sweep = unit_channels['wf_left_sweep'][unit_index]
                if wf_left_sweep > 0:
                    wf_left_sweep = float(wf_left_sweep) / wf_sampling_rate * pq.s
                else:
                    wf_left_sweep = None
                wf_sampling_rate = wf_sampling_rate * pq.Hz
            else:
                waveforms = None
                wf_left_sweep = None
                wf_sampling_rate = None

            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][
                unit_index]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = unit_channels['name'][c]
            annotations = check_annotations(annotations)

            if not lazy:
                spike_timestamp = self.get_spike_timestamps(block_index=block_index,
                                                            seg_index=seg_index,
                                                            unit_index=unit_index,
                                                            t_start=t_start_, t_stop=t_stop_)
                spike_times = self.rescale_spike_timestamp(spike_timestamp, 'float64')
                sptr = SpikeTrain(spike_times, units='s', copy=False,
                                  t_start=seg_t_start, t_stop=seg_t_stop,
                                  waveforms=waveforms, left_sweep=wf_left_sweep,
                                  sampling_rate=wf_sampling_rate, **annotations)
            else:
                nb = self.spike_count(block_index=block_index, seg_index=seg_index,
                                      unit_index=unit_index)
                sptr = SpikeTrain(np.array([]), units='s', copy=False, t_start=seg_t_start,
                                  t_stop=seg_t_stop, **annotations)
                sptr.lazy_shape = (nb,)

            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if not lazy:
                ev_timestamp, ev_raw_durations, ev_labels = self.get_event_timestamps(
                    block_index=block_index,
                    seg_index=seg_index, event_channel_index=chan_ind,
                    t_start=t_start_, t_stop=t_stop_)
                ev_times = self.rescale_event_timestamp(ev_timestamp, 'float64') * pq.s
                if ev_raw_durations is None:
                    ev_durations = None
                else:
                    ev_durations = self.rescale_epoch_duration(ev_raw_durations, 'float64') * pq.s
                ev_labels = ev_labels.astype('S')
            else:
                nb = self.event_count(block_index=block_index, seg_index=seg_index,
                                      event_channel_index=chan_ind)
                lazy_shape = (nb,)
                ev_times = np.array([]) * pq.s
                ev_labels = np.array([], dtype='S')
                ev_durations = np.array([]) * pq.s

            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][
                chan_ind]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = event_channels['name'][chan_ind]

            annotations = check_annotations(annotations)

            if event_channels['type'][chan_ind] == b'event':
                e = Event(times=ev_times, labels=ev_labels, units='s', copy=False, **annotations)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = Epoch(times=ev_times, durations=ev_durations, labels=ev_labels,
                          units='s', copy=False, **annotations)
                e.segment = seg
                seg.epochs.append(e)

            if lazy:
                e.lazy_shape = lazy_shape

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 36
0
    def read_segment(self, lazy=False, cascade=True):

        ## Read header file

        f = open(self.filename + '.ent', 'rU')
        #version
        version = f.readline()
        if version[:2] != 'V2' and version[:2] != 'V3':
            # raise('read only V2 .eeg.ent files')
            raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' %
                               version[:2])
            return

        #info
        info1 = f.readline()[:-1]
        info2 = f.readline()[:-1]

        # strange 2 line for datetime
        #line1
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        YY, MM, DD, hh, mm, ss = (None, ) * 6
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]

        #line2
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]
        try:
            fulldatetime = datetime.datetime(int(YY), int(MM), int(DD),
                                             int(hh), int(mm), int(ss))
        except:
            fulldatetime = None

        seg = Segment(
            file_origin=os.path.basename(self.filename),
            elan_version=version,
            info1=info1,
            info2=info2,
            rec_datetime=fulldatetime,
        )

        if not cascade: return seg

        l = f.readline()
        l = f.readline()
        l = f.readline()

        # sampling rate sample
        l = f.readline()
        sampling_rate = 1. / float(l) * pq.Hz

        # nb channel
        l = f.readline()
        nbchannel = int(l) - 2

        #channel label
        labels = []
        for c in range(nbchannel + 2):
            labels.append(f.readline()[:-1])

        # channel type
        types = []
        for c in range(nbchannel + 2):
            types.append(f.readline()[:-1])

        # channel unit
        units = []
        for c in range(nbchannel + 2):
            units.append(f.readline()[:-1])
        #print units

        #range
        min_physic = []
        for c in range(nbchannel + 2):
            min_physic.append(float(f.readline()))
        max_physic = []
        for c in range(nbchannel + 2):
            max_physic.append(float(f.readline()))
        min_logic = []
        for c in range(nbchannel + 2):
            min_logic.append(float(f.readline()))
        max_logic = []
        for c in range(nbchannel + 2):
            max_logic.append(float(f.readline()))

        #info filter
        info_filter = []
        for c in range(nbchannel + 2):
            info_filter.append(f.readline()[:-1])

        f.close()

        #raw data
        n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8)
        data = np.fromfile(self.filename, dtype='i' + str(n))
        data = data.byteswap().reshape(
            (data.size / (nbchannel + 2), nbchannel + 2)).astype('f4')
        for c in range(nbchannel):
            if lazy:
                sig = []
            else:
                sig = (data[:,c]-min_logic[c])/(max_logic[c]-min_logic[c])*\
                                    (max_physic[c]-min_physic[c])+min_physic[c]

            try:
                unit = pq.Quantity(1, units[c])
            except:
                unit = pq.Quantity(1, '')

            anaSig = AnalogSignal(sig * unit,
                                  sampling_rate=sampling_rate,
                                  t_start=0. * pq.s,
                                  name=labels[c],
                                  channel_index=c)
            if lazy:
                anaSig.lazy_shape = data.shape[0]
            anaSig.annotate(channel_name=labels[c])
            seg.analogsignals.append(anaSig)

        # triggers
        f = open(self.filename + '.pos')
        times = []
        labels = []
        reject_codes = []
        for l in f.readlines():
            r = re.findall(' *(\d+) *(\d+) *(\d+) *', l)
            times.append(float(r[0][0]) / sampling_rate.magnitude)
            labels.append(str(r[0][1]))
            reject_codes.append(str(r[0][2]))
        if lazy:
            times = [] * pq.S
            labels = np.array([], dtype='S')
            reject_codes = []
        else:
            times = np.array(times) * pq.s
            labels = np.array(labels)
            reject_codes = np.array(reject_codes)
        ea = EventArray(
            times=times,
            labels=labels,
            reject_codes=reject_codes,
        )
        if lazy:
            ea.lazy_shape = len(times)
        seg.eventarrays.append(ea)

        f.close()

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 37
0
    def read_segment(self, lazy=False, cascade=True):

        # # Read header file

        f = open(self.filename + '.ent', 'rU')
        #version
        version = f.readline()
        if version[:2] != 'V2' and version[:2] != 'V3':
            # raise('read only V2 .eeg.ent files')
            raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' %
                               version[:2])

        #info
        info1 = f.readline()[:-1]
        info2 = f.readline()[:-1]

        # strange 2 line for datetime
        #line1
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        YY, MM, DD, hh, mm, ss = (None, ) * 6
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]

        #line2
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]
        try:
            fulldatetime = datetime.datetime(int(YY), int(MM), int(DD),
                                             int(hh), int(mm), int(ss))
        except:
            fulldatetime = None

        seg = Segment(file_origin=os.path.basename(self.filename),
                      elan_version=version,
                      info1=info1,
                      info2=info2,
                      rec_datetime=fulldatetime)

        if not cascade:
            return seg

        l = f.readline()
        l = f.readline()
        l = f.readline()

        # sampling rate sample
        l = f.readline()
        sampling_rate = 1. / float(l) * pq.Hz

        # nb channel
        l = f.readline()
        nbchannel = int(l) - 2

        #channel label
        labels = []
        for c in range(nbchannel + 2):
            labels.append(f.readline()[:-1])

        # channel type
        types = []
        for c in range(nbchannel + 2):
            types.append(f.readline()[:-1])

        # channel unit
        units = []
        for c in range(nbchannel + 2):
            units.append(f.readline()[:-1])
        #print units

        #range
        min_physic = []
        for c in range(nbchannel + 2):
            min_physic.append(float(f.readline()))
        max_physic = []
        for c in range(nbchannel + 2):
            max_physic.append(float(f.readline()))
        min_logic = []
        for c in range(nbchannel + 2):
            min_logic.append(float(f.readline()))
        max_logic = []
        for c in range(nbchannel + 2):
            max_logic.append(float(f.readline()))

        #info filter
        info_filter = []
        for c in range(nbchannel + 2):
            info_filter.append(f.readline()[:-1])

        f.close()

        #raw data
        n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8)
        data = np.fromfile(self.filename, dtype='i' + str(n))
        data = data.byteswap().reshape(
            (data.size / (nbchannel + 2), nbchannel + 2)).astype('f4')
        for c in range(nbchannel):
            if lazy:
                sig = []
            else:
                sig = (data[:, c] - min_logic[c]) / (
                    max_logic[c] - min_logic[c]) * \
                    (max_physic[c] - min_physic[c]) + min_physic[c]

            try:
                unit = pq.Quantity(1, units[c])
            except:
                unit = pq.Quantity(1, '')

            ana_sig = AnalogSignal(
                sig * unit, sampling_rate=sampling_rate,
                t_start=0. * pq.s, name=labels[c], channel_index=c)
            if lazy:
                ana_sig.lazy_shape = data.shape[0]
            ana_sig.annotate(channel_name=labels[c])
            seg.analogsignals.append(ana_sig)

        # triggers
        f = open(self.filename + '.pos')
        times = []
        labels = []
        reject_codes = []
        for l in f.readlines():
            r = re.findall(' *(\d+) *(\d+) *(\d+) *', l)
            times.append(float(r[0][0]) / sampling_rate.magnitude)
            labels.append(str(r[0][1]))
            reject_codes.append(str(r[0][2]))
        if lazy:
            times = [] * pq.S
            labels = np.array([], dtype='S')
            reject_codes = []
        else:
            times = np.array(times) * pq.s
            labels = np.array(labels)
            reject_codes = np.array(reject_codes)
        ea = Event(times=times, labels=labels, reject_codes=reject_codes)
        if lazy:
            ea.lazy_shape = len(times)
        seg.events.append(ea)

        f.close()

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 38
0
    def read_segment(self, lazy=False, cascade=True):
        seg = Segment(file_origin=os.path.basename(self.filename), )

        if not cascade:
            return seg

        fid = open(self.filename, 'rb')

        headertext = fid.read(2048)
        if PY3K:
            headertext = headertext.decode('ascii')
        header = {}
        for line in headertext.split('\r\n'):
            if '=' not in line: continue
            #print '#' , line , '#'
            key, val = line.split('=')
            if key in [
                    'NC', 'NR', 'NBH', 'NBA', 'NBD', 'ADCMAX', 'NP', 'NZ',
                    'ADCMAX'
            ]:
                val = int(val)
            elif key in [
                    'AD',
                    'DT',
            ]:
                val = val.replace(',', '.')
                val = float(val)
            header[key] = val

        if not lazy:
            data = np.memmap(
                self.filename,
                np.dtype('i2'),
                'r',
                #shape = (header['NC'], header['NP']) ,
                shape=(
                    header['NP'] / header['NC'],
                    header['NC'],
                ),
                offset=header['NBH'])

        for c in range(header['NC']):

            YCF = float(header['YCF%d' % c].replace(',', '.'))
            YAG = float(header['YAG%d' % c].replace(',', '.'))
            YZ = float(header['YZ%d' % c].replace(',', '.'))

            ADCMAX = header['ADCMAX']
            AD = header['AD']
            DT = header['DT']

            if 'TU' in header:
                if header['TU'] == 'ms':
                    DT *= .001

            unit = header['YU%d' % c]
            try:
                unit = pq.Quantity(1., unit)
            except:
                unit = pq.Quantity(1., '')

            if lazy:
                signal = [] * unit
            else:
                signal = (data[:, header['YO%d' % c]].astype('f4') -
                          YZ) * AD / (YCF * YAG * (ADCMAX + 1)) * unit

            ana = AnalogSignal(signal,
                               sampling_rate=pq.Hz / DT,
                               t_start=0. * pq.s,
                               name=header['YN%d' % c],
                               channel_index=c)
            if lazy:
                ana.lazy_shape = header['NP'] / header['NC']

            seg.analogsignals.append(ana)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 39
0
    def read_segment(
        self,
        # the 2 first keyword arguments are imposed by neo.io API
        lazy=False,
        cascade=True,
        # all following arguments are decied by this IO and are free
        segment_duration=15.,
        num_analogsignal=4,
        num_spiketrain_by_channel=3,
    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000.  #Hz
        t_start = -1.

        #time vector for generated signal
        timevect = np.arange(t_start, t_start + segment_duration,
                             1. / sampling_rate)

        # create an empty segment
        seg = Segment(name='it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal(lazy=lazy,
                                             cascade=cascade,
                                             channel_index=i,
                                             segment_duration=segment_duration,
                                             t_start=t_start)
                seg.analogsignals += [ana]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(
                        lazy=lazy,
                        cascade=cascade,
                        segment_duration=segment_duration,
                        t_start=t_start,
                        channel_index=i)
                    seg.spiketrains += [sptr]

            # create an Event that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to Event
            # for that you need read_segment(cascade = True)

            if lazy:
                # in lazy case no data are readed
                # eva is empty
                eva = Event()
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva = Event(
                    timevect[(np.random.rand(n) * timevect.size).astype('i')] *
                    pq.s)
                # all duration are the same
                eva.durations = np.ones(
                    n
                ) * 500 * pq.ms  # Event doesn't have durations. Is Epoch intended here?
                # label
                l = []
                for i in range(n):
                    if np.random.rand() > .6: l.append('TriggerA')
                    else: l.append('TriggerB')
                eva.labels = np.array(l)

            seg.events += [eva]

        seg.create_many_to_one_relationship()
        return seg
    def read_segment(self, take_ideal_sampling_rate=False,
                     lazy=False, cascade=True):
        """
        Arguments:
        """

        header = self.read_header(filename=self.filename)

        # ~ print header
        fid = open(self.filename, 'rb')

        seg = Segment(
            file_origin=os.path.basename(self.filename),
            ced_version=str(header.system_id),
        )

        if not cascade:
            fid.close()
            return seg

        def addannotations(ob, channelHeader):
            ob.annotate(title=channelHeader.title)
            ob.annotate(physical_channel_index=channelHeader.phy_chan)
            ob.annotate(comment=channelHeader.comment)

        for i in range(header.channels):
            channelHeader = header.channelHeaders[i]

            #~ print 'channel' , i , 'kind' ,  channelHeader.kind

            if channelHeader.kind != 0:
                #~ print '####'
                #~ print 'channel' , i, 'kind' , channelHeader.kind , \
                #~ channelHeader.type , channelHeader.phy_chan
                #~ print channelHeader
                pass

            if channelHeader.kind in [1, 9]:
                #~ print 'analogChanel'
                ana_sigs = self.read_one_channel_continuous(
                    fid, i, header, take_ideal_sampling_rate, lazy=lazy)
                #~ print 'nb sigs', len(anaSigs) , ' sizes : ',
                for anaSig in ana_sigs:
                    addannotations(anaSig, channelHeader)
                    anaSig.name = str(anaSig.annotations['title'])
                    seg.analogsignals.append(anaSig)
                    #~ print sig.signal.size,
                    #~ print ''

            elif channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = self.read_one_channel_event_or_spike(
                    fid, i, header, lazy=lazy)
                if ea is not None:
                    addannotations(ea, channelHeader)
                    seg.events.append(ea)

            elif channelHeader.kind in [6, 7]:
                sptrs = self.read_one_channel_event_or_spike(
                    fid, i, header, lazy=lazy)
                if sptrs is not None:
                    for sptr in sptrs:
                        addannotations(sptr, channelHeader)
                        seg.spiketrains.append(sptr)

        fid.close()

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 41
0
    def read_segment(self, lazy=False, cascade=True):
        fid = open(self.filename, 'rb')
        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        # ~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=global_header['version'])
        seg.annotate(comment=global_header['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(global_header['nvar']):
            entity_header = HeaderReader(
                fid, EntityHeader).read_f(offset=offset + i * 208)
            entity_header['name'] = entity_header['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entity_header['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(self.filename,
                                            np.dtype('i4'),
                                            'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype(
                        'f8') / global_header['freq'] * pq.s
                sptr = SpikeTrain(times=spike_times,
                                  t_start=global_header['tbeg'] /
                                  global_header['freq'] * pq.s,
                                  t_stop=global_header['tend'] /
                                  global_header['freq'] * pq.s,
                                  name=entity_header['name'])
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(self.filename,
                                            np.dtype('i4'),
                                            'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    event_times = event_times.astype(
                        'f8') / global_header['freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = Event(times=event_times,
                             labels=labels,
                             channel_name=entity_header['name'])
                if lazy:
                    evar.lazy_shape = entity_header['n']
                seg.events.append(evar)

            if entity_header['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(self.filename,
                                            np.dtype('i4'),
                                            'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    start_times = start_times.astype(
                        'f8') / global_header['freq'] * pq.s
                    stop_times = np.memmap(self.filename,
                                           np.dtype('i4'),
                                           'r',
                                           shape=(entity_header['n']),
                                           offset=entity_header['offset'] +
                                           entity_header['n'] * 4)
                    stop_times = stop_times.astype(
                        'f') / global_header['freq'] * pq.s
                epar = Epoch(times=start_times,
                             durations=stop_times - start_times,
                             labels=np.array([''] * start_times.size,
                                             dtype='S'),
                             channel_name=entity_header['name'])
                if lazy:
                    epar.lazy_shape = entity_header['n']
                seg.epochs.append(epar)

            if entity_header['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(self.filename,
                                            np.dtype('i4'),
                                            'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype(
                        'f8') / global_header['freq'] * pq.s

                    waveforms = np.memmap(self.filename,
                                          np.dtype('i2'),
                                          'r',
                                          shape=(entity_header['n'], 1,
                                                 entity_header['NPointsWave']),
                                          offset=entity_header['offset'] +
                                          entity_header['n'] * 4)
                    waveforms = (
                        waveforms.astype('f') * entity_header['ADtoMV'] +
                        entity_header['MVOffset']) * pq.mV
                t_stop = global_header['tend'] / global_header['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] / global_header['freq'] *
                    pq.s,
                    #~ t_stop = max(globalHeader['tend']/
                    #~ globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop,
                    name=entity_header['name'],
                    waveforms=waveforms,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms)
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 4:
                # popvectors
                pass

            if entity_header['type'] == 5:
                # analog
                timestamps = np.memmap(self.filename,
                                       np.dtype('i4'),
                                       'r',
                                       shape=(entity_header['n']),
                                       offset=entity_header['offset'])
                timestamps = timestamps.astype('f8') / global_header['freq']

                fragment_starts_offset = entity_header[
                    'offset'] + entity_header['n'] * 4
                fragment_starts = np.memmap(self.filename,
                                            np.dtype('i4'),
                                            'r',
                                            shape=(entity_header['n']),
                                            offset=fragment_starts_offset)
                fragment_starts = fragment_starts.astype(
                    'f8') / global_header['freq']
                t_start = timestamps[0] - fragment_starts[0] / float(
                    entity_header['WFrequency'])
                del timestamps, fragment_starts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal_offset = fragment_starts_offset + entity_header[
                        'n'] * 4
                    signal = np.memmap(self.filename,
                                       np.dtype('i2'),
                                       'r',
                                       shape=(entity_header['NPointsWave']),
                                       offset=signal_offset)
                    signal = signal.astype('f')
                    signal *= entity_header['ADtoMV']
                    signal += entity_header['MVOffset']
                    signal = signal * pq.mV

                ana_sig = AnalogSignal(
                    signal=signal,
                    t_start=t_start * pq.s,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    name=entity_header['name'],
                    channel_index=entity_header['WireNumber'])
                if lazy:
                    ana_sig.lazy_shape = entity_header['NPointsWave']
                seg.analogsignals.append(ana_sig)

            if entity_header['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(self.filename,
                                      np.dtype('i4'),
                                      'r',
                                      shape=(entity_header['n']),
                                      offset=entity_header['offset'])
                    times = times.astype('f8') / global_header['freq'] * pq.s
                    fid.seek(entity_header['offset'] + entity_header['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename,
                        np.dtype('S' + str(entity_header['MarkerLength'])),
                        'r',
                        shape=(entity_header['n']),
                        offset=entity_header['offset'] +
                        entity_header['n'] * 4 + 64)
                ea = Event(times=times,
                           labels=labels.view(np.ndarray),
                           name=entity_header['name'],
                           channel_index=entity_header['WireNumber'],
                           marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entity_header['n']
                seg.events.append(ea)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 42
0
    def read_segment(
        self,
        lazy=False,
        cascade=True,
        delimiter='\t',
        usecols=None,
        skiprows=0,
        timecolumn=None,
        sampling_rate=1. * pq.Hz,
        t_start=0. * pq.s,
        unit=pq.V,
        method='genfromtxt',
    ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            usecols : if None take all columns otherwise a list for selected columns
            skiprows : skip n first lines in case they contains header informations
            timecolumn :  None or a valid int that point the time vector
            samplerate : the samplerate of signals if timecolumn is not None this is not take in account
            t_start : time of the first sample
            unit : unit of AnalogSignal can be a str or directly a Quantities

            method :  'genfromtxt' or 'csv' or 'homemade'
                        in case of bugs you can try one of this methods

                        'genfromtxt' use numpy.genfromtxt
                        'csv' use cvs module
                        'homemade' use a intuitive more robust but slow method

        """
        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        if type(sampling_rate) == float or type(sampling_rate) == int:
            # if not quantitities Hz by default
            sampling_rate = sampling_rate * pq.Hz

        if type(t_start) == float or type(t_start) == int:
            # if not quantitities s by default
            t_start = t_start * pq.s

        unit = pq.Quantity(1, unit)

        #loadtxt
        if method == 'genfromtxt':
            sig = np.genfromtxt(self.filename,
                                delimiter=delimiter,
                                usecols=usecols,
                                skiprows=skiprows,
                                dtype='f')
            if len(sig.shape) == 1:
                sig = sig[:, np.newaxis]
        elif method == 'csv':
            tab = [
                l for l in csv.reader(file(self.filename, 'rU'),
                                      delimiter=delimiter)
            ]
            tab = tab[skiprows:]
            sig = np.array(tab, dtype='f')
        elif method == 'homemade':
            fid = open(self.filename, 'rU')
            for l in range(skiprows):
                fid.readline()
            tab = []
            for line in fid.readlines():
                line = line.replace('\r', '')
                line = line.replace('\n', '')
                l = line.split(delimiter)
                while '' in l:
                    l.remove('')
                tab.append(l)
            sig = np.array(tab, dtype='f')

        if timecolumn is not None:
            sampling_rate = 1. / np.mean(np.diff(sig[:, timecolumn])) * pq.Hz
            t_start = sig[0, timecolumn] * pq.s

        for i in range(sig.shape[1]):
            if timecolumn == i: continue
            if usecols is not None and i not in usecols: continue

            if lazy:
                signal = [] * unit
            else:
                signal = sig[:, i] * unit

            anaSig = AnalogSignal(signal,
                                  sampling_rate=sampling_rate,
                                  t_start=t_start,
                                  channel_index=i,
                                  name='Column %d' % i)
            if lazy:
                anaSig.lazy_shape = sig.shape
            seg.analogsignals.append(anaSig)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 43
0
    def read_segment(self,
                     lazy=False,
                     delimiter='\t',
                     usecols=None,
                     skiprows=0,

                     timecolumn=None,
                     sampling_rate=1. * pq.Hz,
                     t_start=0. * pq.s,

                     unit=pq.V,

                     method='genfromtxt',

                     ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            usecols : if None take all columns otherwise a list for selected columns
            skiprows : skip n first lines in case they contains header informations
            timecolumn :  None or a valid int that point the time vector
            samplerate : the samplerate of signals if timecolumn is not None this is not take in account
            t_start : time of the first sample
            unit : unit of AnalogSignal can be a str or directly a Quantities

            method :  'genfromtxt' or 'csv' or 'homemade'
                        in case of bugs you can try one of this methods

                        'genfromtxt' use numpy.genfromtxt
                        'csv' use cvs module
                        'homemade' use a intuitive more robust but slow method

        """
        assert not lazy, 'Do not support lazy'

        seg = Segment(file_origin=os.path.basename(self.filename))

        if type(sampling_rate) == float or type(sampling_rate) == int:
            # if not quantitities Hz by default
            sampling_rate = sampling_rate * pq.Hz

        if type(t_start) == float or type(t_start) == int:
            # if not quantitities s by default
            t_start = t_start * pq.s

        unit = pq.Quantity(1, unit)

        # loadtxt
        if method == 'genfromtxt':
            sig = np.genfromtxt(self.filename,
                                delimiter=delimiter,
                                usecols=usecols,
                                skip_header=skiprows,
                                dtype='f')
            if len(sig.shape) == 1:
                sig = sig[:, np.newaxis]
        elif method == 'csv':
            tab = [l for l in csv.reader(file(self.filename, 'rU'), delimiter=delimiter)]
            tab = tab[skiprows:]
            sig = np.array(tab, dtype='f')
        elif method == 'homemade':
            fid = open(self.filename, 'rU')
            for l in range(skiprows):
                fid.readline()
            tab = []
            for line in fid.readlines():
                line = line.replace('\r', '')
                line = line.replace('\n', '')
                l = line.split(delimiter)
                while '' in l:
                    l.remove('')
                tab.append(l)
            sig = np.array(tab, dtype='f')

        if timecolumn is not None:
            sampling_rate = 1. / np.mean(np.diff(sig[:, timecolumn])) * pq.Hz
            t_start = sig[0, timecolumn] * pq.s

        for i in range(sig.shape[1]):
            if timecolumn == i:
                continue
            if usecols is not None and i not in usecols:
                continue

            signal = sig[:, i] * unit

            anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                  t_start=t_start, channel_index=i,
                                  name='Column %d' % i)

            seg.analogsignals.append(anaSig)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 44
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """
        Read in a segment.

        Arguments:
            load_spike_waveform : load or not waveform of spikes (default True)

        """

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader.pop('Year'),
            globalHeader.pop('Month'),
            globalHeader.pop('Day'),
            globalHeader.pop('Hour'),
            globalHeader.pop('Minute'),
            globalHeader.pop('Second')
        )
        seg.file_origin = os.path.basename(self.filename)

        for key, val in globalHeader.iteritems():
            seg.annotate(**{key: val})

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = spikes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']] = channelHeader
            maxunit = max(channelHeader['NUnits'], maxunit)
            maxchan = max(channelHeader['Channel'], maxchan)

        # event channel header
        eventHeaders = { }
        for _ in range(globalHeader['NumEventChannels']):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype='f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
        wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')

        # eventarrays
        nb_events = { }
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() !=-1 :
            # read block header
            dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
            if dataBlockHeader is None : break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan,unit] +=1
                wf_sizes[chan,unit,:] = [n1,n2]
                fid.seek(n1*n2*2,1)
            elif dataBlockHeader['Type'] ==4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2*2, 1)
                if n2> 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] ==0:
                    t_starts[chan] = time
                    

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = { }
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])
                
            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            swfarrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan,unit] = np.zeros(nb_spikes[chan,unit], dtype = 'f')
                if load_spike_waveform:
                    n1,n2 = wf_sizes[chan, unit,:]
                    swfarrays[chan, unit] = np.zeros( (nb_spikes[chan, unit], n1, n2 ) , dtype = 'f4' )
            pos_spikes = np.zeros(nb_spikes.shape, dtype = 'i')
                    
            # allocating mem for event
            eventpositions = { }
            evarrays = { }
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = {
                    'times': np.zeros(nb, dtype='f'),
                    'labels': np.zeros(nb, dtype='S4')
                }
                eventpositions[chan]=0 
                
            fid.seek(start)
            while fid.tell() !=-1 :
                dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
                if dataBlockHeader is None : break
                chan = dataBlockHeader['Channel']
                n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
                time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']
                time/= globalHeader['ADFrequency']

                if n2 <0: break
                if dataBlockHeader['Type'] == 1:
                    #spike
                    unit = dataBlockHeader['Unit']
                    pos = pos_spikes[chan,unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1*n2 != 0 :
                        swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4')
                    else:
                        fid.seek(n1*n2*2,1)
                    pos_spikes[chan,unit] +=1
                
                elif dataBlockHeader['Type'] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan]['times'][pos] = time
                    evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
                    eventpositions[chan]+= 1

                elif dataBlockHeader['Type'] == 5:
                    #signal
                    data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data
                    sample_positions[chan] += data.size


        ## Step 4: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
                labels = None
            else:
                times = evarrays[chan]['times']
                labels = evarrays[chan]['labels']
            ea = EventArray(
                times*pq.s,
                labels=labels,
                channel_name=eventHeaders[chan]['Name'],
                channel_index=chan
            )
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

            
        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = [ ]
            else:
                if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
                elif globalHeader['Version'] ==102 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                elif globalHeader['Version'] >= 103:
                    gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
                                                        slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                signal = sigarrays[chan]*gain
            anasig =  AnalogSignal(signal*pq.V,
                sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
                t_start = t_starts[chan]*pq.s,
                channel_index = slowChannelHeaders[chan]['Channel'],
                channel_name = slowChannelHeaders[chan]['Name'],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)
            
        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0: continue
            if lazy:
                times = [ ]
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan,unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader['Version'] <103:
                        gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
                    elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
                    elif globalHeader['Version'] >105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])                    
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(
                times,
                units='s', 
                t_stop=t_stop*pq.s,
                waveforms=waveforms
            )
            sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
            sptr.annotate(channel_index = chan)
            for key, val in dspChannelHeaders[chan].iteritems():
                sptr.annotate(**{key: val})

            if lazy:
                sptr.lazy_shape = nb_spikes[chan,unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
    def read_segment(self, cascade=True, lazy=False, ):
        """
        Arguments:
        """
        f = StructFile(open(self.filename, 'rb'))

        # Name
        f.seek(64, 0)
        surname = f.read(22).decode('ascii')
        while surname[-1] == ' ':
            if len(surname) == 0:
                break
            surname = surname[:-1]
        firstname = f.read(20).decode('ascii')
        while firstname[-1] == ' ':
            if len(firstname) == 0:
                break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(name=str(firstname + ' ' + surname),
                      file_origin=os.path.basename(self.filename))
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            f.close()
            return seg

        # area
        f.seek(176, 0)
        zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B',
                      'IMPED_E', 'MONTAGE',
                      'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
                      'EVENT B', 'TRIGGER']
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((-1, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromstring(f.read(Num_Chan*2), dtype='u2', count=Num_Chan)

        units = {-1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent,
                 101: pq.dimensionless, 102: pq.dimensionless}

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip(b"\x00").decode('ascii')
            ground = f.read(6).strip(b"\x00").decode('ascii')
            (logical_min, logical_max, logical_ground, physical_min,
             physical_max) = f.read_f('iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max - physical_min) / float(
                    logical_max - logical_min + 1)
                signal = (rawdata[:, c].astype(
                    'f') - logical_ground) * factor * unit

            ana_sig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                   name=str(label), channel_index=c)
            if lazy:
                ana_sig.lazy_shape = None
            ana_sig.annotate(ground=ground)

            seg.analogsignals.append(ana_sig)

        sampling_rate = np.mean(
            [ana_sig.sampling_rate for ana_sig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(f.read(length), dtype=[('pos', 'u4'), (
                'label', label_dtype)])
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (
                    triggers['pos'] != 0)
                triggers = triggers[keep]
                ea = Event(name=zname[0] + zname[1:].lower(),
                           labels=triggers['label'].astype('S'),
                           times=(triggers['pos'] / sampling_rate).rescale('s'))
            else:
                ea = Event(name=zname[0] + zname[1:].lower())
                ea.lazy_shape = triggers.size
            seg.events.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[('label', 'u4'), ('start', 'u4'),
                                          ('stop', 'u4'), ])
            ep = Epoch(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (
                    epochs['stop'] < rawdata.shape[0])
                epochs = epochs[keep]
                ep = Epoch(name=zname[0] + zname[1:].lower(),
                           labels=epochs['label'].astype('S'),
                           times=(epochs['start'] / sampling_rate).rescale('s'),
                           durations=((epochs['stop'] - epochs['start']) / sampling_rate).rescale('s'))
            else:
                ep = Epoch(name=zname[0] + zname[1:].lower())
                ep.lazy_shape = triggers.size
            seg.epochs.append(ep)

        seg.create_many_to_one_relationship()
        f.close()
        return seg
Ejemplo n.º 46
0
    def read_segment(self,
                     import_neuroshare_segment=True,
                     lazy=False,
                     cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment(file_origin=os.path.basename(self.filename), )

        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)

        #elif sys.platform.startswith('darwin'):

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info))
        seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' +
                     str(info.dwAPIVersionMin))

        if not cascade:
            return seg

        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename),
                               ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo),
                                  ctypes.sizeof(fileinfo))

        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo(hFile, dwEntityID,
                                        ctypes.byref(entityInfo),
                                        ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo(hFile, dwEntityID,
                                           ctypes.byref(pEventInfo),
                                           ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0:  #TEXT
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:  #CVS
                    pData = ctypes.create_string_buffer(
                        pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:  # 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:  # 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:  # 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name=str(entityInfo.szEntityLabel), )
                if not lazy:
                    times = []
                    labels = []
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetEventData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize))
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times * pq.s
                    ea.labels = np.array(labels, dtype='S')
                else:
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo(hFile, dwEntityID,
                                            ctypes.byref(pAnalogInfo),
                                            ctypes.sizeof(pAnalogInfo))
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [] * pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    total_read = 0
                    while total_read < entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount -
                                                      total_read)

                        neuroshare.ns_GetAnalogData(
                            hFile, dwEntityID, dwStartIndex, dwStopIndex,
                            ctypes.byref(pdwContCount),
                            pData[total_read:].ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value

                    signal = pq.Quantity(pData,
                                         units=pAnalogInfo.szUnits,
                                         copy=False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex,
                                             ctypes.byref(pdTime))

                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pAnalogInfo.dSampleRate * pq.Hz,
                    t_start=pdTime.value * pq.s,
                    name=str(entityInfo.szEntityLabel),
                )
                anaSig.annotate(probe_info=str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append(anaSig)

            #segment
            if entity_types[
                    entityInfo.
                    dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo(hFile, dwEntityID,
                                             ctypes.byref(pdwSegmentInfo),
                                             ctypes.sizeof(pdwSegmentInfo))
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)

                for dwSourceID in range(pdwSegmentInfo.dwSourceCount):
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo(
                        hFile, dwEntityID, dwSourceID,
                        ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo))

                if lazy:
                    sptr = SpikeTrain(times,
                                      name=str(entityInfo.szEntityLabel),
                                      t_stop=0. * pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount
                    pData = np.zeros((dwDataBufferSize), dtype='float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID = ctypes.c_uint32(0)

                    nsample = int(dwDataBufferSize)
                    times = np.empty((entityInfo.dwItemCount), dtype='f')
                    waveforms = np.empty(
                        (entityInfo.dwItemCount, nsource, nsample), dtype='f')
                    for dwIndex in range(entityInfo.dwItemCount):
                        neuroshare.ns_GetSegmentData(
                            hFile, dwEntityID, dwIndex,
                            ctypes.byref(pdTimeStamp),
                            pData.ctypes.data_as(
                                ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                            ctypes.byref(pdwUnitID))

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[
                            dwIndex, :, :] = pData[:nsample * nsource].reshape(
                                nsample, nsource).transpose()

                    sptr = SpikeTrain(
                        times=pq.Quantity(times, units='s', copy=False),
                        t_stop=times.max(),
                        waveforms=pq.Quantity(waveforms,
                                              units=str(
                                                  pdwSegmentInfo.szUnits),
                                              copy=False),
                        left_sweep=nsample / 2. /
                        float(pdwSegmentInfo.dSampleRate) * pq.s,
                        sampling_rate=float(pdwSegmentInfo.dSampleRate) *
                        pq.Hz,
                        name=str(entityInfo.szEntityLabel),
                    )
                seg.spiketrains.append(sptr)

            # neuralevent
            if entity_types[
                    entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo(hFile, dwEntityID,
                                            ctypes.byref(pNeuralInfo),
                                            ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [] * pq.s
                    t_stop = 0 * pq.s
                else:
                    pData = np.zeros((entityInfo.dwItemCount, ),
                                     dtype='float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData(
                        hFile, dwEntityID, dwStartIndex, dwIndexCount,
                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData * pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(
                    times,
                    t_stop=t_stop,
                    name=str(entityInfo.szEntityLabel),
                )
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
def generate_one_simple_segment(seg_name='segment 0',
                                supported_objects=[],
                                nb_analogsignal=4,
                                t_start=0.*pq.s,
                                sampling_rate=10*pq.kHz,
                                duration=6.*pq.s,

                                nb_spiketrain=6,
                                spikerate_range=[.5*pq.Hz, 12*pq.Hz],

                                event_types={'stim': ['a', 'b',
                                                      'c', 'd'],
                                             'enter_zone': ['one',
                                                            'two'],
                                             'color': ['black',
                                                       'yellow',
                                                       'green'],
                                             },
                                event_size_range=[5, 20],

                                epoch_types={'animal state': ['Sleep',
                                                              'Freeze',
                                                              'Escape'],
                                             'light': ['dark',
                                                       'lighted']
                                             },
                                epoch_duration_range=[.5, 3.],

                                ):
    if supported_objects and Segment not in supported_objects:
        raise ValueError('Segment must be in supported_objects')
    seg = Segment(name=seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int(sampling_rate * duration)),
                                       sampling_rate=sampling_rate, t_start=t_start,
                                       units=pq.mV, channel_index=a,
                                       name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand()*np.diff(spikerate_range)
            spikerate += spikerate_range[0].magnitude
            #spikedata = rand(int((spikerate*duration).simplified))*duration
            #sptr = SpikeTrain(spikedata,
            #                  t_start=t_start, t_stop=t_start+duration)
            #                  #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate*duration).simplified))
            spikes.sort()  # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain(spikes*duration,
                              t_start=t_start, t_stop=t_start+duration)
            sptr.annotations['channel_index'] = s
            seg.spiketrains.append(sptr)

    if Event in supported_objects:
        for name, labels in event_types.items():
            evt_size = rand()*np.diff(event_size_range)
            evt_size += event_size_range[0]
            evt_size = int(evt_size)
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(evt_size)*len(labels)).astype('i')]
            evt = Event(times=rand(evt_size)*duration, labels=labels)
            seg.events.append(evt)

    if Epoch in supported_objects:
        for name, labels in epoch_types.items():
            t = 0
            times = []
            durations = []
            while t < duration:
                times.append(t)
                dur = rand()*np.diff(epoch_duration_range)
                dur += epoch_duration_range[0]
                durations.append(dur)
                t = t+dur
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(len(times))*len(labels)).astype('i')]
            epc = Epoch(times=pq.Quantity(times, units=pq.s),
                        durations=pq.Quantity([x[0] for x in durations],
                                              units=pq.s),
                        labels=labels,
                        )
            seg.epochs.append(epc)

    # TODO : Spike, Event

    seg.create_many_to_one_relationship()
    return seg
Ejemplo n.º 48
0
    def read_segment(
        self,
        take_ideal_sampling_rate=False,
        lazy=False,
        cascade=True,
    ):
        """
        Arguments:
        """

        header = self.read_header(filename=self.filename)

        #~ print header
        fid = open(self.filename, 'rb')

        seg = Segment(
            file_origin=os.path.basename(self.filename),
            ced_version=str(header.system_id),
        )

        if not cascade:
            return seg

        def addannotations(ob, channelHeader):
            ob.annotate(title=channelHeader.title)
            ob.annotate(physical_channel_index=channelHeader.phy_chan)
            ob.annotate(comment=channelHeader.comment)

        for i in range(header.channels):
            channelHeader = header.channelHeaders[i]

            #~ print 'channel' , i , 'kind' ,  channelHeader.kind

            if channelHeader.kind != 0:
                #~ print '####'
                #~ print 'channel' , i, 'kind' , channelHeader.kind , channelHeader.type , channelHeader.phy_chan
                #~ print channelHeader
                pass

            if channelHeader.kind in [1, 9]:
                #~ print 'analogChanel'
                anaSigs = self.readOneChannelContinuous(
                    fid, i, header, take_ideal_sampling_rate, lazy=lazy)
                #~ print 'nb sigs', len(anaSigs) , ' sizes : ',
                for anaSig in anaSigs:
                    addannotations(anaSig, channelHeader)
                    anaSig.name = str(anaSig.annotations['title'])
                    seg.analogsignals.append(anaSig)
                    #~ print sig.signal.size,
                #~ print ''

            elif channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = self.readOneChannelEventOrSpike(fid, i, header, lazy=lazy)
                if ea is not None:
                    for ev in ea:
                        addannotations(ev, channelHeader)
                        seg.eventarrays.append(ev)

            elif channelHeader.kind in [6, 7]:
                sptr = self.readOneChannelEventOrSpike(fid,
                                                       i,
                                                       header,
                                                       lazy=lazy)
                if sptr is not None:
                    for sp in sptr:
                        addannotations(sp, channelHeader)
                        seg.spiketrains.append(sp)

        fid.close()

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 49
0
    def read_segment(
        self,
        cascade=True,
        lazy=False,
        sampling_rate=1.0 * pq.Hz,
        t_start=0.0 * pq.s,
        unit=pq.V,
        nbchannel=1,
        bytesoffset=0,
        dtype="f4",
        rangemin=-10,
        rangemax=10,
    ):
        """
        Reading signal in a raw binary interleaved compact file.

        Arguments:
            sampling_rate :  sample rate
            t_start : time of the first sample sample of each channel
            unit: unit of AnalogSignal can be a str or directly a Quantities
            nbchannel : number of channel
            bytesoffset : nb of bytes offset at the start of file

            dtype : dtype of the data
            rangemin , rangemax : if the dtype is integer, range can give in volt the min and the max of the range
        """
        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        dtype = np.dtype(dtype)

        if type(sampling_rate) == float or type(sampling_rate) == int:
            # if not quantitities Hz by default
            sampling_rate = sampling_rate * pq.Hz

        if type(t_start) == float or type(t_start) == int:
            # if not quantitities s by default
            t_start = t_start * pq.s

        unit = pq.Quantity(1, unit)

        if not lazy:
            sig = np.memmap(self.filename, dtype=dtype, mode="r", offset=bytesoffset)
            if sig.size % nbchannel != 0:
                sig = sig[: -sig.size % nbchannel]
            sig = sig.reshape((sig.size / nbchannel, nbchannel))
            if dtype.kind == "i":
                sig = sig.astype("f")
                sig /= 2 ** (8 * dtype.itemsize)
                sig *= rangemax - rangemin
                sig += (rangemax + rangemin) / 2.0
            elif dtype.kind == "u":
                sig = sig.astype("f")
                sig /= 2 ** (8 * dtype.itemsize)
                sig *= rangemax - rangemin
                sig += rangemin
            sig_with_units = pq.Quantity(sig, units=unit, copy=False)

        for i in range(nbchannel):
            if lazy:
                signal = [] * unit
            else:
                signal = sig_with_units[:, i]

            anaSig = AnalogSignal(signal, sampling_rate=sampling_rate, t_start=t_start, channel_index=i, copy=False)

            if lazy:
                # TODO
                anaSig.lazy_shape = None
            seg.analogsignals.append(anaSig)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 50
0
    def read_segment(self,
                     block_index=0,
                     seg_index=0,
                     lazy=False,
                     signal_group_mode=None,
                     load_waveforms=False,
                     time_slice=None,
                     strict_slicing=True):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default.

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.

        :param strict_slicing: True by default.
             Control if an error is raise or not when one of  time_slice member (t_start or t_stop)
             is outside the real time range of the segment.
        """

        if lazy:
            assert time_slice is None, 'For lazy=true you must specify time_slice when loading'

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(
            self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        # AnalogSignal
        signal_channels = self.header['signal_channels']
        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                for i, (ind_within,
                        ind_abs) in self._make_signal_channel_subgroups(
                            channel_indexes,
                            signal_group_mode=signal_group_mode).items():
                    # make a proxy...
                    anasig = AnalogSignalProxy(rawio=self,
                                               global_channel_indexes=ind_abs,
                                               block_index=block_index,
                                               seg_index=seg_index)

                    if not lazy:
                        # ... and get the real AnalogSIgnal if not lazy
                        anasig = anasig.load(time_slice=time_slice,
                                             strict_slicing=strict_slicing)
                        # TODO magnitude_mode='rescaled'/'raw'

                    anasig.segment = seg
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            # make a proxy...
            sptr = SpikeTrainProxy(rawio=self,
                                   unit_index=unit_index,
                                   block_index=block_index,
                                   seg_index=seg_index)

            if not lazy:
                # ... and get the real SpikeTrain if not lazy
                sptr = sptr.load(time_slice=time_slice,
                                 strict_slicing=strict_slicing,
                                 load_waveforms=load_waveforms)
                # TODO magnitude_mode='rescaled'/'raw'

            sptr.segment = seg
            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if event_channels['type'][chan_ind] == b'event':
                e = EventProxy(rawio=self,
                               event_channel_index=chan_ind,
                               block_index=block_index,
                               seg_index=seg_index)
                if not lazy:
                    e = e.load(time_slice=time_slice,
                               strict_slicing=strict_slicing)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = EpochProxy(rawio=self,
                               event_channel_index=chan_ind,
                               block_index=block_index,
                               seg_index=seg_index)
                if not lazy:
                    e = e.load(time_slice=time_slice,
                               strict_slicing=strict_slicing)
                e.segment = seg
                seg.epochs.append(e)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 51
0
    def read_segment(self,
                                        cascade = True,
                                        lazy = False,

                                        sampling_rate = 1.*pq.Hz,
                                        t_start = 0.*pq.s,
                                        unit = pq.V,

                                        nbchannel = 1,
                                        bytesoffset = 0,

                                        dtype = 'f4',
                                        rangemin = -10,
                                        rangemax = 10,
                                    ):
        """
        Reading signal in a raw binary interleaved compact file.

        Arguments:
            sampling_rate :  sample rate
            t_start : time of the first sample sample of each channel
            unit: unit of AnalogSignal can be a str or directly a Quantities
            nbchannel : number of channel
            bytesoffset : nb of bytes offset at the start of file

            dtype : dtype of the data
            rangemin , rangemax : if the dtype is integer, range can give in volt the min and the max of the range
        """
        seg = Segment(file_origin = os.path.basename(self.filename))
        if not cascade:
            return seg

        dtype = np.dtype(dtype)

        if type(sampling_rate) == float or type(sampling_rate)==int:
            # if not quantitities Hz by default
            sampling_rate = sampling_rate*pq.Hz

        if type(t_start) == float or type(t_start)==int:
            # if not quantitities s by default
            t_start = t_start*pq.s

        unit = pq.Quantity(1, unit)

        if not lazy:
            sig = np.memmap(self.filename, dtype = dtype, mode = 'r', offset = bytesoffset)
            if sig.size % nbchannel != 0 :
                sig = sig[:- sig.size%nbchannel]
            sig = sig.reshape((sig.size/nbchannel,nbchannel))
            if dtype.kind == 'i' :
                sig = sig.astype('f')
                sig /= 2**(8*dtype.itemsize)
                sig *= ( rangemax-rangemin )
                sig += ( rangemax+rangemin )/2.
            elif dtype.kind == 'u' :
                sig = sig.astype('f')
                sig /= 2**(8*dtype.itemsize)
                sig *= ( rangemax-rangemin )
                sig += rangemin
            sig_with_units =  pq.Quantity(sig, units=unit, copy = False)
        
        for i in range(nbchannel) :
            if lazy:
                signal = [ ]*unit
            else:
                signal = sig_with_units[:,i]

            anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                  t_start=t_start, channel_index=i, copy = False)
            
            if lazy:
                # TODO
                anaSig.lazy_shape = None
            seg.analogsignals.append(anaSig)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 52
0
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

                anaSig = AnalogSignal(signal,
                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
                                                    t_start = pdTime.value * pq.s,
                                                    name = str(entityInfo.szEntityLabel),
                                                    )
                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append( anaSig )


            #segment
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )

                if lazy:
                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp  = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID= ctypes.c_uint32(0)

                    nsample  = int(dwDataBufferSize)
                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                                ctypes.byref(pdwUnitID ) )

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
                    
                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
                                        t_stop = times.max(),
                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
                                        name = str(entityInfo.szEntityLabel),
                                        )
                seg.spiketrains.append(sptr)


            # neuralevent
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [ ]*pq.s
                    t_stop = 0*pq.s
                else:
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData*pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(times, t_stop =t_stop,
                                                name = str(entityInfo.szEntityLabel),)
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 53
0
    def read_segment(
        self,
        lazy=False,
        # all following arguments are decided by this IO and are free
        t_start=0.,
        segment_duration=0.,
    ):
        """
        Return a Segment containing all analog and spike channels, as well as
        all trigger events.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """
        assert not lazy, 'Do not support lazy'

        # if no segment duration is given, use the complete file
        if segment_duration == 0.:
            segment_duration = float(self.metadata["TimeSpan"])
        # if the segment duration is bigger than file, use the complete file
        if segment_duration >= float(self.metadata["TimeSpan"]):
            segment_duration = float(self.metadata["TimeSpan"])
        # if the time sum of start point and segment duration is bigger than
        # the file time span, cap it at the end
        if segment_duration + t_start > float(self.metadata["TimeSpan"]):
            segment_duration = float(self.metadata["TimeSpan"]) - t_start

        # create an empty segment
        seg = Segment(name="segment from the NeuroshareapiIO")

        # read nested analosignal

        if self.metadata["num_analogs"] == 0:
            print("no analog signals in this file!")
        else:
            # run through the number of analog channels found at the __init__ function
            for i in range(self.metadata["num_analogs"]):
                # create an analog signal object for each channel found
                ana = self.read_analogsignal(
                    channel_index=self.metadata["elecChanId"][i],
                    segment_duration=segment_duration,
                    t_start=t_start)
                # add analog signal read to segment object
                seg.analogsignals += [ana]

        # read triggers (in this case without any duration)
        for i in range(self.metadata["num_trigs"]):
            # create event object for each trigger/bit found
            eva = self.read_event(
                channel_index=self.metadata["triggersId"][i],
                segment_duration=segment_duration,
                t_start=t_start,
            )
            # add event object to segment
            seg.events += [eva]
        # read epochs (digital events with duration)
        for i in range(self.metadata["num_digiEpochs"]):
            # create event object for each trigger/bit found
            epa = self.read_epoch(
                channel_index=self.metadata["digiEpochId"][i],
                segment_duration=segment_duration,
                t_start=t_start,
            )
            # add event object to segment
            seg.epochs += [epa]
        # read nested spiketrain
        # run through all spike channels found
        for i in range(self.metadata["num_spkChans"]):
            # create spike object
            sptr = self.read_spiketrain(
                channel_index=self.metadata["spkChanId"][i],
                segment_duration=segment_duration,
                t_start=t_start)
            # add the spike object to segment
            seg.spiketrains += [sptr]

        seg.create_many_to_one_relationship()

        return seg