def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        rchan = RecordingChannel(name='rchan1')
        rchan.analogsignals = [signal]
        rchan.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'RecordingChannel'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'recordingchannel'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects,
                         ('Segment', 'RecordingChannel'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'recordingchannel'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'rchan1')

        assert_neo_object_is_compliant(signal)
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        rchan = RecordingChannel(name='rchan1')
        rchan.analogsignals = [signal]
        rchan.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'RecordingChannel'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'recordingchannel'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects,
                         ('Segment', 'RecordingChannel'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'recordingchannel'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'rchan1')

        assert_neo_object_is_compliant(signal)
示例#3
0
    def read_block(self, lazy=False, cascade=True):
        """

        """

        blk = Block()
        if cascade:
            seg = Segment(file_origin=self._absolute_filename)

            blk.channel_indexes = self._channel_indexes

            blk.segments += [seg]

            seg.analogsignals = self.read_analogsignal(lazy=lazy,
                                                       cascade=cascade)
            try:
                seg.irregularlysampledsignals = self.read_tracking()
            except Exception as e:
                print('Warning: unable to read tracking')
                print(e)
            seg.spiketrains = self.read_spiketrain()

            # TODO Call all other read functions

            seg.duration = self._duration

            # TODO May need to "populate_RecordingChannel"

            # spiketrain = self.read_spiketrain()

            # seg.spiketrains.append()

        blk.create_many_to_one_relationship()
        return blk
示例#4
0
def prune_segment(segment: Segment) -> None:
    segment.analogsignals = [a for a in segment.analogsignals if "type_id" in a.annotations]
    segment.epochs = [ep for ep in segment.epochs if "type_id" in ep.annotations]
    segment.events = [ev for ev in segment.events if "type_id" in ev.annotations]
    segment.irregularlysampledsignals = [i for i in segment.irregularlysampledsignals if "type_id" in i.annotations]
    segment.spiketrains = [st for st in segment.spiketrains if "type_id" in st.annotations]
    segment.imagesequences = [i for i in segment.imagesequences if "type_id" in i.annotations]
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        chx = ChannelIndex(name='chx1', index=np.arange(signal.shape[1]))
        chx.analogsignals = [signal]
        chx.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects, ('Segment', 'ChannelIndex'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers, ('segment', 'channel_index'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects, ('Segment', 'ChannelIndex'))
        self.assertEqual(signal._parent_containers, ('segment', 'channel_index'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'chx1')

        assert_neo_object_is_compliant(signal)
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name='seg1')
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        chx = ChannelIndex(name='chx1', index=np.arange(signal.shape[1]))
        chx.analogsignals = [signal]
        chx.create_many_to_one_relationship()

        self.assertEqual(signal._single_parent_objects,
                         ('Segment', 'ChannelIndex'))
        self.assertEqual(signal._multi_parent_objects, ())

        self.assertEqual(signal._single_parent_containers,
                         ('segment', 'channel_index'))
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._parent_objects, ('Segment', 'ChannelIndex'))
        self.assertEqual(signal._parent_containers,
                         ('segment', 'channel_index'))

        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, 'seg1')
        self.assertEqual(signal.parents[1].name, 'chx1')

        assert_neo_object_is_compliant(signal)
示例#7
0
    def _read_segment(self, fobject):
        '''
        Read a single segment with a single analogsignal

        Returns the segment or None if there are no more segments
        '''

        try:
            # float64 -- start time of the AnalogSignal
            t_start = np.fromfile(fobject, dtype=np.float64, count=1)[0]
        except IndexError:
            # if there are no more Segments, return
            return False

        # int16 -- index of the stimulus parameters
        seg_index = np.fromfile(fobject, dtype=np.int16, count=1)[0].tolist()

        # int16 -- number of stimulus parameters
        numelements = np.fromfile(fobject, dtype=np.int16, count=1)[0]

        # read the name strings for the stimulus parameters
        paramnames = []
        for _ in range(numelements):
            # unit8 -- the number of characters in the string
            numchars = np.fromfile(fobject, dtype=np.uint8, count=1)[0]

            # char * numchars -- a single name string
            name = np.fromfile(fobject, dtype=np.uint8, count=numchars)

            # exclude invalid characters
            name = str(name[name >= 32].view('c').tostring())

            # add the name to the list of names
            paramnames.append(name)

        # float32 * numelements -- the values for the stimulus parameters
        paramvalues = np.fromfile(fobject, dtype=np.float32, count=numelements)

        # combine parameter names and the parameters as a dict
        params = dict(zip(paramnames, paramvalues))

        # int32 -- the number elements in the AnalogSignal
        numpts = np.fromfile(fobject, dtype=np.int32, count=1)[0]

        # int16 * numpts -- the AnalogSignal itself
        signal = np.fromfile(fobject, dtype=np.int16, count=numpts)

        sig = AnalogSignal(signal.astype(np.float) * pq.mV,
                           t_start=t_start * pq.d,
                           file_origin=self._filename,
                           sampling_period=1. * pq.s,
                           copy=False)
        # Note: setting the sampling_period to 1 s is arbitrary

        # load the AnalogSignal and parameters into a new Segment
        seg = Segment(file_origin=self._filename, index=seg_index, **params)
        seg.analogsignals = [sig]

        return seg
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12]+'.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    rcg = RecordingChannelGroup(file_origin=filename)
    chan = RecordingChannel(file_origin=filename, index=0, name='Chan1')
    rcg.channel_indexes = np.array([1])
    rcg.channel_names = np.array(['Chan1'], dtype='S')

    block.recordingchannelgroups.append(rcg)
    rcg.recordingchannels.append(chan)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal*pq.mV,
                           t_start=timestamp*pq.d,
                           file_origin=filename,
                           sampling_period=1.*pq.s)
        segment = Segment(file_origin=filename,
                          index=stimIndex,
                          **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    create_many_to_one_relationship(block)

    return block
def proc_dam(filename):
    '''Load an dam file that has already been processed by the official matlab
    file converter.  That matlab data is saved to an m-file, which is then
    converted to a numpy '.npz' file.  This numpy file is the file actually
    loaded.  This function converts it to a neo block and returns the block.
    This block can be compared to the block produced by BrainwareDamIO to
    make sure BrainwareDamIO is working properly

    block = proc_dam(filename)

    filename: The file name of the numpy file to load.  It should end with
    '*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
    with the value '*.dam', so the filename to compare should fit that pattern.
    'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
    for the python 3 version of the numpy file.

    example: filename = 'file1_dam_py2.npz'
             dam file name = 'file1.dam'
    '''
    with np.load(filename) as damobj:
        damfile = damobj.items()[0][1].flatten()

    filename = os.path.basename(filename[:-12] + '.dam')

    signals = [res.flatten() for res in damfile['signal']]
    stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
    timestamps = [res[0, 0] for res in damfile['timestamp']]

    block = Block(file_origin=filename)

    chx = ChannelIndex(file_origin=filename,
                       index=np.array([0]),
                       channel_ids=np.array([1]),
                       channel_names=np.array(['Chan1'], dtype='S'))

    block.channel_indexes.append(chx)

    params = [res['params'][0, 0].flatten() for res in damfile['stim']]
    values = [res['values'][0, 0].flatten() for res in damfile['stim']]
    params = [[res1[0] for res1 in res] for res in params]
    values = [[res1 for res1 in res] for res in values]
    stims = [dict(zip(param, value)) for param, value in zip(params, values)]

    fulldam = zip(stimIndexes, timestamps, signals, stims)
    for stimIndex, timestamp, signal, stim in fulldam:
        sig = AnalogSignal(signal=signal * pq.mV,
                           t_start=timestamp * pq.d,
                           file_origin=filename,
                           sampling_period=1. * pq.s)
        segment = Segment(file_origin=filename,
                          index=stimIndex,
                          **stim)
        segment.analogsignals = [sig]
        block.segments.append(segment)

    block.create_many_to_one_relationship()

    return block
示例#10
0
    def _read_segment(self, node, parent):
        attributes = self._get_standard_attributes(node)
        segment = Segment(**attributes)

        signals = []
        for name, child_node in node['analogsignals'].items():
            if "AnalogSignal" in name:
                signals.append(self._read_analogsignal(child_node, parent=segment))
        if signals and self.merge_singles:
            segment.unmerged_analogsignals = signals  # signals will be merged later
            signals = []
        for name, child_node in node['analogsignalarrays'].items():
            if "AnalogSignalArray" in name:
                signals.append(self._read_analogsignalarray(child_node, parent=segment))
        segment.analogsignals = signals

        irr_signals = []
        for name, child_node in node['irregularlysampledsignals'].items():
            if "IrregularlySampledSignal" in name:
                irr_signals.append(self._read_irregularlysampledsignal(child_node, parent=segment))
        if irr_signals and self.merge_singles:
            segment.unmerged_irregularlysampledsignals = irr_signals
            irr_signals = []
        segment.irregularlysampledsignals = irr_signals

        epochs = []
        for name, child_node in node['epochs'].items():
            if "Epoch" in name:
                epochs.append(self._read_epoch(child_node, parent=segment))
        if self.merge_singles:
            epochs = self._merge_data_objects(epochs)
        for name, child_node in node['epocharrays'].items():
            if "EpochArray" in name:
                epochs.append(self._read_epocharray(child_node, parent=segment))
        segment.epochs = epochs

        events = []
        for name, child_node in node['events'].items():
            if "Event" in name:
                events.append(self._read_event(child_node, parent=segment))
        if self.merge_singles:
            events = self._merge_data_objects(events)
        for name, child_node in node['eventarrays'].items():
            if "EventArray" in name:
                events.append(self._read_eventarray(child_node, parent=segment))
        segment.events = events

        spiketrains = []
        for name, child_node in node['spikes'].items():
            raise NotImplementedError('Spike objects not yet handled.')
        for name, child_node in node['spiketrains'].items():
            if "SpikeTrain" in name:
                spiketrains.append(self._read_spiketrain(child_node, parent=segment))
        segment.spiketrains = spiketrains

        segment.block = parent
        return segment
示例#11
0
    def _read_segment(self, node, parent):
        attributes = self._get_standard_attributes(node)
        segment = Segment(**attributes)

        signals = []
        for name, child_node in node['analogsignals'].items():
            if "AnalogSignal" in name:
                signals.append(self._read_analogsignal(child_node, parent=segment))
        if signals and self.merge_singles:
            segment.unmerged_analogsignals = signals  # signals will be merged later
            signals = []
        for name, child_node in node['analogsignalarrays'].items():
            if "AnalogSignalArray" in name:
                signals.append(self._read_analogsignalarray(child_node, parent=segment))
        segment.analogsignals = signals

        irr_signals = []
        for name, child_node in node['irregularlysampledsignals'].items():
            if "IrregularlySampledSignal" in name:
                irr_signals.append(self._read_irregularlysampledsignal(child_node, parent=segment))
        if irr_signals and self.merge_singles:
            segment.unmerged_irregularlysampledsignals = irr_signals
            irr_signals = []
        segment.irregularlysampledsignals = irr_signals

        epochs = []
        for name, child_node in node['epochs'].items():
            if "Epoch" in name:
                epochs.append(self._read_epoch(child_node, parent=segment))
        if self.merge_singles:
            epochs = self._merge_data_objects(epochs)
        for name, child_node in node['epocharrays'].items():
            if "EpochArray" in name:
                epochs.append(self._read_epocharray(child_node, parent=segment))
        segment.epochs = epochs

        events = []
        for name, child_node in node['events'].items():
            if "Event" in name:
                events.append(self._read_event(child_node, parent=segment))
        if self.merge_singles:
            events = self._merge_data_objects(events)
        for name, child_node in node['eventarrays'].items():
            if "EventArray" in name:
                events.append(self._read_eventarray(child_node, parent=segment))
        segment.events = events

        spiketrains = []
        for name, child_node in node['spikes'].items():
            raise NotImplementedError('Spike objects not yet handled.')
        for name, child_node in node['spiketrains'].items():
            if "SpikeTrain" in name:
                spiketrains.append(self._read_spiketrain(child_node, parent=segment))
        segment.spiketrains = spiketrains

        segment.block = parent
        return segment
    def test__children(self):
        signal = self.signals[0]

        segment = Segment(name="seg1")
        segment.analogsignals = [signal]
        segment.create_many_to_one_relationship()

        rchan = RecordingChannel(name="rchan1")
        rchan.analogsignals = [signal]
        rchan.create_many_to_one_relationship()

        self.assertEqual(signal._container_child_objects, ())
        self.assertEqual(signal._data_child_objects, ())
        self.assertEqual(signal._single_parent_objects, ("Segment", "RecordingChannel"))
        self.assertEqual(signal._multi_child_objects, ())
        self.assertEqual(signal._multi_parent_objects, ())
        self.assertEqual(signal._child_properties, ())

        self.assertEqual(signal._single_child_objects, ())

        self.assertEqual(signal._container_child_containers, ())
        self.assertEqual(signal._data_child_containers, ())
        self.assertEqual(signal._single_child_containers, ())
        self.assertEqual(signal._single_parent_containers, ("segment", "recordingchannel"))
        self.assertEqual(signal._multi_child_containers, ())
        self.assertEqual(signal._multi_parent_containers, ())

        self.assertEqual(signal._child_objects, ())
        self.assertEqual(signal._child_containers, ())
        self.assertEqual(signal._parent_objects, ("Segment", "RecordingChannel"))
        self.assertEqual(signal._parent_containers, ("segment", "recordingchannel"))

        self.assertEqual(signal.children, ())
        self.assertEqual(len(signal.parents), 2)
        self.assertEqual(signal.parents[0].name, "seg1")
        self.assertEqual(signal.parents[1].name, "rchan1")

        signal.create_many_to_one_relationship()
        signal.create_many_to_many_relationship()
        signal.create_relationship()
        assert_neo_object_is_compliant(signal)
示例#13
0
    def test__cut_block_by_epochs(self):
        epoch = Epoch([0.5, 10.0, 25.2] * pq.s,
                      durations=[5.1, 4.8, 5.0] * pq.s,
                      t_start=.1 * pq.s)
        epoch.annotate(epoch_type='a', pick='me')
        epoch.array_annotate(trial_id=[1, 2, 3])

        epoch2 = Epoch([0.6, 9.5, 16.8, 34.1] * pq.s,
                       durations=[4.5, 4.8, 5.0, 5.0] * pq.s,
                       t_start=.1 * pq.s)
        epoch2.annotate(epoch_type='b')
        epoch2.array_annotate(trial_id=[1, 2, 3, 4])

        event = Event(times=[0.5, 10.0, 25.2] * pq.s, t_start=.1 * pq.s)
        event.annotate(event_type='trial start')
        event.array_annotate(trial_id=[1, 2, 3])

        anasig = AnalogSignal(np.arange(50.0) * pq.mV,
                              t_start=.1 * pq.s,
                              sampling_rate=1.0 * pq.Hz)
        irrsig = IrregularlySampledSignal(signal=np.arange(50.0) * pq.mV,
                                          times=anasig.times,
                                          t_start=.1 * pq.s)
        st = SpikeTrain(
            np.arange(0.5, 50, 7) * pq.s,
            t_start=.1 * pq.s,
            t_stop=50.0 * pq.s,
            waveforms=np.array(
                [[[0., 1.], [0.1, 1.1]], [[2., 3.], [2.1, 3.1]],
                 [[4., 5.], [4.1, 5.1]], [[6., 7.], [6.1, 7.1]],
                 [[8., 9.], [8.1, 9.1]], [[12., 13.], [12.1, 13.1]],
                 [[14., 15.], [14.1, 15.1]], [[16., 17.], [16.1, 17.1]]]) *
            pq.mV,
            array_annotations={'spikenum': np.arange(1, 9)})

        seg = Segment()
        seg2 = Segment(name='NoCut')
        seg.epochs = [epoch, epoch2]
        seg.events = [event]
        seg.analogsignals = [anasig]
        seg.irregularlysampledsignals = [irrsig]
        seg.spiketrains = [st]

        block = Block()
        block.segments = [seg, seg2]
        block.create_many_to_one_relationship()

        # test without resetting the time
        cut_block_by_epochs(block, properties={'pick': 'me'})

        assert_neo_object_is_compliant(block)
        self.assertEqual(len(block.segments), 3)

        for epoch_idx in range(len(epoch)):
            self.assertEqual(len(block.segments[epoch_idx].events), 1)
            self.assertEqual(len(block.segments[epoch_idx].spiketrains), 1)
            self.assertEqual(len(block.segments[epoch_idx].analogsignals), 1)
            self.assertEqual(
                len(block.segments[epoch_idx].irregularlysampledsignals), 1)

            if epoch_idx != 0:
                self.assertEqual(len(block.segments[epoch_idx].epochs), 1)
            else:
                self.assertEqual(len(block.segments[epoch_idx].epochs), 2)

            assert_same_attributes(
                block.segments[epoch_idx].spiketrains[0],
                st.time_slice(t_start=epoch.times[epoch_idx],
                              t_stop=epoch.times[epoch_idx] +
                              epoch.durations[epoch_idx]))
            assert_same_attributes(
                block.segments[epoch_idx].analogsignals[0],
                anasig.time_slice(t_start=epoch.times[epoch_idx],
                                  t_stop=epoch.times[epoch_idx] +
                                  epoch.durations[epoch_idx]))
            assert_same_attributes(
                block.segments[epoch_idx].irregularlysampledsignals[0],
                irrsig.time_slice(t_start=epoch.times[epoch_idx],
                                  t_stop=epoch.times[epoch_idx] +
                                  epoch.durations[epoch_idx]))
            assert_same_attributes(
                block.segments[epoch_idx].events[0],
                event.time_slice(t_start=epoch.times[epoch_idx],
                                 t_stop=epoch.times[epoch_idx] +
                                 epoch.durations[epoch_idx]))
        assert_same_attributes(
            block.segments[0].epochs[0],
            epoch.time_slice(t_start=epoch.times[0],
                             t_stop=epoch.times[0] + epoch.durations[0]))
        assert_same_attributes(
            block.segments[0].epochs[1],
            epoch2.time_slice(t_start=epoch.times[0],
                              t_stop=epoch.times[0] + epoch.durations[0]))

        seg = Segment()
        seg2 = Segment(name='NoCut')
        seg.epochs = [epoch, epoch2]
        seg.events = [event]
        seg.analogsignals = [anasig]
        seg.irregularlysampledsignals = [irrsig]
        seg.spiketrains = [st]

        block = Block()
        block.segments = [seg, seg2]
        block.create_many_to_one_relationship()

        # test with resetting the time
        cut_block_by_epochs(block, properties={'pick': 'me'}, reset_time=True)

        assert_neo_object_is_compliant(block)
        self.assertEqual(len(block.segments), 3)

        for epoch_idx in range(len(epoch)):
            self.assertEqual(len(block.segments[epoch_idx].events), 1)
            self.assertEqual(len(block.segments[epoch_idx].spiketrains), 1)
            self.assertEqual(len(block.segments[epoch_idx].analogsignals), 1)
            self.assertEqual(
                len(block.segments[epoch_idx].irregularlysampledsignals), 1)
            if epoch_idx != 0:
                self.assertEqual(len(block.segments[epoch_idx].epochs), 1)
            else:
                self.assertEqual(len(block.segments[epoch_idx].epochs), 2)

            assert_same_attributes(
                block.segments[epoch_idx].spiketrains[0],
                st.time_shift(-epoch.times[epoch_idx]).time_slice(
                    t_start=0 * pq.s, t_stop=epoch.durations[epoch_idx]))

            anasig_target = anasig.time_shift(-epoch.times[epoch_idx])
            anasig_target = anasig_target.time_slice(
                t_start=0 * pq.s, t_stop=epoch.durations[epoch_idx])
            assert_same_attributes(block.segments[epoch_idx].analogsignals[0],
                                   anasig_target)
            irrsig_target = irrsig.time_shift(-epoch.times[epoch_idx])
            irrsig_target = irrsig_target.time_slice(
                t_start=0 * pq.s, t_stop=epoch.durations[epoch_idx])
            assert_same_attributes(
                block.segments[epoch_idx].irregularlysampledsignals[0],
                irrsig_target)
            assert_same_attributes(
                block.segments[epoch_idx].events[0],
                event.time_shift(-epoch.times[epoch_idx]).time_slice(
                    t_start=0 * pq.s, t_stop=epoch.durations[epoch_idx]))

        assert_same_attributes(
            block.segments[0].epochs[0],
            epoch.time_shift(-epoch.times[0]).time_slice(
                t_start=0 * pq.s, t_stop=epoch.durations[0]))
        assert_same_attributes(
            block.segments[0].epochs[1],
            epoch2.time_shift(-epoch.times[0]).time_slice(
                t_start=0 * pq.s, t_stop=epoch.durations[0]))
示例#14
0
    def read_segment(self,
                     gid_list=None,
                     time_unit=pq.ms,
                     t_start=None,
                     t_stop=None,
                     sampling_period=None,
                     id_column_dat=0,
                     time_column_dat=1,
                     value_columns_dat=2,
                     id_column_gdf=0,
                     time_column_gdf=1,
                     value_types=None,
                     value_units=None,
                     lazy=False,
                     cascade=True):
        """
        Reads a Segment which contains SpikeTrain(s) with specified neuron IDs
        from the GDF data.

        Arguments
        ----------
        gid_list : list, default: None
            A list of GDF IDs of which to return SpikeTrain(s). gid_list must
            be specified if the GDF file contains neuron IDs, the default None
            then raises an error. Specify an empty list [] to retrieve the spike
            trains of all neurons.
        time_unit : Quantity (time), optional, default: quantities.ms
            The time unit of recorded time stamps in DAT as well as GDF files.
        t_start : Quantity (time), optional, default: 0 * pq.ms
            Start time of SpikeTrain.
        t_stop : Quantity (time), default: None
            Stop time of SpikeTrain. t_stop must be specified, the default None
            raises an error.
        sampling_period : Quantity (frequency), optional, default: None
            Sampling period of the recorded data.
        id_column_dat : int, optional, default: 0
            Column index of neuron IDs in the DAT file.
        time_column_dat : int, optional, default: 1
            Column index of time stamps in the DAT file.
        value_columns_dat : int, optional, default: 2
            Column index of the analog values recorded in the DAT file.
        id_column_gdf : int, optional, default: 0
            Column index of neuron IDs in the GDF file.
        time_column_gdf : int, optional, default: 1
            Column index of time stamps in the GDF file.
        value_types : str, optional, default: None
            Nest data type of the analog values recorded, eg.'V_m', 'I', 'g_e'
        value_units : Quantity (amplitude), default: None
            The physical unit of the recorded signal values.
        lazy : bool, optional, default: False
        cascade : bool, optional, default: True

        Returns
        -------
        seg : Segment
            The Segment contains one SpikeTrain and one AnalogSignal for
            each ID in gid_list.
        """
        if isinstance(gid_list, tuple):
            if gid_list[0] > gid_list[1]:
                raise ValueError('The second entry in gid_list must be '
                                 'greater or equal to the first entry.')
            gid_list = range(gid_list[0], gid_list[1] + 1)

        # __read_xxx() needs a list of IDs
        if gid_list is None:
            gid_list = [None]

        # create an empty Segment
        seg = Segment(file_origin=",".join(self.filenames))
        seg.file_datetime = datetime.fromtimestamp(
            os.stat(self.filenames[0]).st_mtime)
        # todo: rather than take the first file for the timestamp, we should take the oldest
        #       in practice, there won't be much difference

        if cascade:
            # Load analogsignals and attach to Segment
            if 'dat' in self.avail_formats:
                seg.analogsignals = self.__read_analogsignals(
                    gid_list,
                    time_unit,
                    t_start,
                    t_stop,
                    sampling_period=sampling_period,
                    id_column=id_column_dat,
                    time_column=time_column_dat,
                    value_columns=value_columns_dat,
                    value_types=value_types,
                    value_units=value_units,
                    lazy=lazy)
            if 'gdf' in self.avail_formats:
                seg.spiketrains = self.__read_spiketrains(
                    gid_list,
                    time_unit,
                    t_start,
                    t_stop,
                    id_column=id_column_gdf,
                    time_column=time_column_gdf)

        return seg
示例#15
0
    def _read_segment(self, fobject, lazy):
        """
        Read a single segment with a single analogsignal

        Returns the segment or None if there are no more segments
        """

        try:
            # float64 -- start time of the AnalogSignal
            t_start = np.fromfile(fobject, dtype=np.float64, count=1)[0]
        except IndexError:
            # if there are no more Segments, return
            return False

        # int16 -- index of the stimulus parameters
        seg_index = np.fromfile(fobject, dtype=np.int16, count=1)[0].tolist()

        # int16 -- number of stimulus parameters
        numelements = np.fromfile(fobject, dtype=np.int16, count=1)[0]

        # read the name strings for the stimulus parameters
        paramnames = []
        for _ in range(numelements):
            # unit8 -- the number of characters in the string
            numchars = np.fromfile(fobject, dtype=np.uint8, count=1)[0]

            # char * numchars -- a single name string
            name = np.fromfile(fobject, dtype=np.uint8, count=numchars)

            # exclude invalid characters
            name = str(name[name >= 32].view("c").tostring())

            # add the name to the list of names
            paramnames.append(name)

        # float32 * numelements -- the values for the stimulus parameters
        paramvalues = np.fromfile(fobject, dtype=np.float32, count=numelements)

        # combine parameter names and the parameters as a dict
        params = dict(zip(paramnames, paramvalues))

        # int32 -- the number elements in the AnalogSignal
        numpts = np.fromfile(fobject, dtype=np.int32, count=1)[0]

        # int16 * numpts -- the AnalogSignal itself
        signal = np.fromfile(fobject, dtype=np.int16, count=numpts)

        # handle lazy loading
        if lazy:
            sig = AnalogSignal(
                [],
                t_start=t_start * pq.d,
                file_origin=self._filename,
                sampling_period=1.0 * pq.s,
                units=pq.mV,
                dtype=np.float,
            )
            sig.lazy_shape = len(signal)
        else:
            sig = AnalogSignal(
                signal.astype(np.float) * pq.mV,
                t_start=t_start * pq.d,
                file_origin=self._filename,
                sampling_period=1.0 * pq.s,
                copy=False,
            )
        # Note: setting the sampling_period to 1 s is arbitrary

        # load the AnalogSignal and parameters into a new Segment
        seg = Segment(file_origin=self._filename, index=seg_index, **params)
        seg.analogsignals = [sig]

        return seg
示例#16
0
    def read_segment(self, gid_list=None, time_unit=pq.ms, t_start=None,
                     t_stop=None, sampling_period=None, id_column_dat=0,
                     time_column_dat=1, value_columns_dat=2,
                     id_column_gdf=0, time_column_gdf=1, value_types=None,
                     value_units=None, lazy=False):
        """
        Reads a Segment which contains SpikeTrain(s) with specified neuron IDs
        from the GDF data.

        Arguments
        ----------
        gid_list : list, default: None
            A list of GDF IDs of which to return SpikeTrain(s). gid_list must
            be specified if the GDF file contains neuron IDs, the default None
            then raises an error. Specify an empty list [] to retrieve the spike
            trains of all neurons.
        time_unit : Quantity (time), optional, default: quantities.ms
            The time unit of recorded time stamps in DAT as well as GDF files.
        t_start : Quantity (time), optional, default: 0 * pq.ms
            Start time of SpikeTrain.
        t_stop : Quantity (time), default: None
            Stop time of SpikeTrain. t_stop must be specified, the default None
            raises an error.
        sampling_period : Quantity (frequency), optional, default: None
            Sampling period of the recorded data.
        id_column_dat : int, optional, default: 0
            Column index of neuron IDs in the DAT file.
        time_column_dat : int, optional, default: 1
            Column index of time stamps in the DAT file.
        value_columns_dat : int, optional, default: 2
            Column index of the analog values recorded in the DAT file.
        id_column_gdf : int, optional, default: 0
            Column index of neuron IDs in the GDF file.
        time_column_gdf : int, optional, default: 1
            Column index of time stamps in the GDF file.
        value_types : str, optional, default: None
            Nest data type of the analog values recorded, eg.'V_m', 'I', 'g_e'
        value_units : Quantity (amplitude), default: None
            The physical unit of the recorded signal values.
        lazy : bool, optional, default: False

        Returns
        -------
        seg : Segment
            The Segment contains one SpikeTrain and one AnalogSignal for
            each ID in gid_list.
        """
        assert not lazy, 'Do not support lazy'

        if isinstance(gid_list, tuple):
            if gid_list[0] > gid_list[1]:
                raise ValueError('The second entry in gid_list must be '
                                 'greater or equal to the first entry.')
            gid_list = range(gid_list[0], gid_list[1] + 1)

        # __read_xxx() needs a list of IDs
        if gid_list is None:
            gid_list = [None]

        # create an empty Segment
        seg = Segment(file_origin=",".join(self.filenames))
        seg.file_datetime = datetime.fromtimestamp(os.stat(self.filenames[0]).st_mtime)
        # todo: rather than take the first file for the timestamp, we should take the oldest
        #       in practice, there won't be much difference

        # Load analogsignals and attach to Segment
        if 'dat' in self.avail_formats:
            seg.analogsignals = self.__read_analogsignals(
                gid_list,
                time_unit,
                t_start,
                t_stop,
                sampling_period=sampling_period,
                id_column=id_column_dat,
                time_column=time_column_dat,
                value_columns=value_columns_dat,
                value_types=value_types,
                value_units=value_units)
        if 'gdf' in self.avail_formats:
            seg.spiketrains = self.__read_spiketrains(
                gid_list,
                time_unit,
                t_start,
                t_stop,
                id_column=id_column_gdf,
                time_column=time_column_gdf)

        return seg
示例#17
0
    def test_roundtrip_with_annotations(self):
        # test with NWB-specific annotations

        original_block = Block(name="experiment",
                               session_start_time=datetime.now())
        segment = Segment(name="session 1")
        original_block.segments.append(segment)
        segment.block = original_block

        electrode_annotations = {
            "name": "electrode #1",
            "description": "intracellular electrode",
            "device": {
                "name": "electrode #1"
            }
        }
        stimulus_annotations = {
            "nwb_group": "stimulus",
            "nwb_neurodata_type":
            ("pynwb.icephys", "CurrentClampStimulusSeries"),
            "nwb_electrode": electrode_annotations,
            "nwb:sweep_number": 1,
            "nwb:gain": 1.0
        }
        response_annotations = {
            "nwb_group": "acquisition",
            "nwb_neurodata_type": ("pynwb.icephys", "CurrentClampSeries"),
            "nwb_electrode": electrode_annotations,
            "nwb:sweep_number": 1,
            "nwb:gain": 1.0,
            "nwb:bias_current": 1e-12,
            "nwb:bridge_balance": 70e6,
            "nwb:capacitance_compensation": 1e-12
        }
        stimulus = AnalogSignal(np.random.randn(100, 1) * pq.nA,
                                sampling_rate=5 * pq.kHz,
                                t_start=50 * pq.ms,
                                name="stimulus",
                                **stimulus_annotations)
        response = AnalogSignal(np.random.randn(100, 1) * pq.mV,
                                sampling_rate=5 * pq.kHz,
                                t_start=50 * pq.ms,
                                name="response",
                                **response_annotations)
        segment.analogsignals = [stimulus, response]
        stimulus.segment = response.segment = segment

        test_file_name = "test_round_trip_with_annotations.nwb"
        iow = NWBIO(filename=test_file_name, mode='w')
        iow.write_all_blocks([original_block])

        nwbfile = pynwb.NWBHDF5IO(test_file_name, mode="r").read()

        self.assertIsInstance(nwbfile.acquisition["response"],
                              pynwb.icephys.CurrentClampSeries)
        self.assertIsInstance(nwbfile.stimulus["stimulus"],
                              pynwb.icephys.CurrentClampStimulusSeries)
        self.assertEqual(nwbfile.acquisition["response"].bridge_balance,
                         response_annotations["nwb:bridge_balance"])

        ior = NWBIO(filename=test_file_name, mode='r')
        retrieved_block = ior.read_all_blocks()[0]

        original_response = original_block.segments[0].filter(
            name="response")[0]
        retrieved_response = retrieved_block.segments[0].filter(
            name="response")[0]
        for attr_name in ("name", "units", "sampling_rate", "t_start"):
            retrieved_attribute = getattr(retrieved_response, attr_name)
            original_attribute = getattr(original_response, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(retrieved_response.magnitude,
                           original_response.magnitude)

        os.remove(test_file_name)