Exemplo n.º 1
0
 def setUp(self):
     self.evt = Event(times=np.arange(0, 100, 1) * pq.s,
                      name='Ch1',
                      labels=np.repeat(np.array(['t0', 't1'], dtype='S'),
                                       50))
     self.evt2 = Event(times=np.arange(0, 100, 3) * pq.s,
                       name='Ch2',
                       labels=np.repeat(np.array(['t2', 't3'], dtype='S'),
                                        17))
     self.segment = Segment()
     self.segment.events.append(self.evt)
     self.segment.events.append(self.evt2)
     self.df = pd.DataFrame(data=[[1, 0], [1, 1]],
                            index=['start', 'stop'],
                            columns=['Ch1', 'Ch2'])
     self.startoftrial = ['start']
     self.epochs = ['results']
     self.name = 'MyEvents'
     self.typeframe = pd.DataFrame(data=['start', 'results'],
                                   columns=['type'],
                                   index=['start', 'stop'])
     ProcessEvents(seg=self.segment,
                   tolerance=1,
                   evtframe=self.df,
                   name=self.name)
     self.columns = ['time', 'event', 'trial_idx', 'results', \
         'with_previous_results', 'event_type']
Exemplo n.º 2
0
    def test__add_epoch(self):
        proxy_event = EventProxy(rawio=self.reader,
                                 event_channel_index=0,
                                 block_index=0,
                                 seg_index=0)

        loaded_event = proxy_event.load()

        regular_event = Event(times=loaded_event.times -
                              1 * loaded_event.units)

        loaded_event.annotate(nix_name='neo.event.0')
        regular_event.annotate(nix_name='neo.event.1')

        seg = Segment()
        seg.events = [regular_event, proxy_event]

        # test cutting with two events one of which is a proxy
        epoch = add_epoch(seg, regular_event, proxy_event)

        assert_neo_object_is_compliant(epoch)
        exp_annos = {
            k: v
            for k, v in regular_event.annotations.items() if k != 'nix_name'
        }
        self.assertDictEqual(epoch.annotations, exp_annos)
        assert_arrays_almost_equal(epoch.times, regular_event.times, 1e-12)
        assert_arrays_almost_equal(
            epoch.durations,
            np.ones(regular_event.shape) * loaded_event.units, 1e-12)
Exemplo n.º 3
0
    def _get_tracking(self, channel, conversion):

        if channel is not None:
            eva = Event()
            ttls = self._kwe['event_types']['TTL']['events'][
                'time_samples'].value
            event_channels = self._kwe['event_types']['TTL']['events'][
                'user_data']['event_channels'].value
            event_id = self._kwe['event_types']['TTL']['events']['user_data'][
                'eventID'].value
            eva.times = (ttls[(event_channels == channel) & (event_id == 1)] /
                         self._attrs['kwe']['sample_rate']) * pq.s
            eva.name = 'TrackingTTL'

        posdata = self._kwe['event_types']['Binary_messages']['events'][
            'user_data']['Data'].value
        node_id = self._kwe['event_types']['Binary_messages']['events'][
            'user_data']['nodeID'].value
        time_samples = self._kwe['event_types']['Binary_messages']['events'][
            'time_samples'].value
        sigs = []
        for node in self._nodes['OSC Port']:
            irsig = IrregularlySampledSignal(
                signal=posdata[node_id == int(node['NodeId'])] * conversion *
                pq.m,
                times=(time_samples[node_id == int(node['NodeId'])] /
                       self._attrs['kwe']['sample_rate']) * pq.s,
                name=node['address'])
            sigs += [irsig]
        if channel is not None:
            return eva, sigs
        else:
            return sigs
Exemplo n.º 4
0
 def _read_eventarray(self, node, parent):
     attributes = self._get_standard_attributes(node)
     times = self._get_quantity(node["times"])
     labels = node["labels"].value
     event = Event(times=times, labels=labels, **attributes)
     event.segment = parent
     return event
Exemplo n.º 5
0
    def setup_events(self):
        eventname11 = 'event 1 1'
        eventname12 = 'event 1 2'
        eventname21 = 'event 2 1'
        eventname22 = 'event 2 2'

        eventtime11 = 10 * pq.ms
        eventtime12 = 20 * pq.ms
        eventtime21 = 30 * pq.s
        eventtime22 = 40 * pq.s

        self.eventnames1 = [eventname11, eventname12]
        self.eventnames2 = [eventname21, eventname22]
        self.eventnames = [eventname11, eventname12, eventname21, eventname22]

        params1 = {'testattr': True}
        params2 = {'testattr': 5}
        event11 = Event(eventtime11, label=eventname11, name=eventname11,
                        **params1)
        event12 = Event(eventtime12, label=eventname12, name=eventname12,
                        **params2)
        event21 = Event(eventtime21, label=eventname21, name=eventname21)
        event22 = Event(eventtime22, label=eventname22, name=eventname22)

        self.event1 = [event11, event12]
        self.event2 = [event21, event22]
        self.event = [event11, event12, event21, event22]
 def setUp(self):
     self.signal = AnalogSignal(np.random.randn(1000, 1),
                                units='V',
                                sampling_rate=1 * pq.Hz)
     self.signal2 = AnalogSignal(np.random.randn(1000, 1),
                                 units='V',
                                 sampling_rate=1 * pq.Hz)
     self.signal_start = 10
     self.signal_end = 10
     self.evt = Event(np.arange(0, 100, 1) * pq.s,
                      labels=np.repeat(np.array(['t0', 't1'], dtype='S'),
                                       50))
     self.evt2 = Event(np.arange(0, 100, 1) * pq.s,
                       labels=np.repeat(np.array(['t2', 't3'], dtype='S'),
                                        50))
     self.evt_start = 15
     self.evt_pre_start = self.evt_start - 5
     self.evt_end = 85
     self.evt_post_end = self.evt_end + 5
     self.not_segment = [100]
     self.segment = Segment()
     self.segment.analogsignals.append(self.signal)
     self.segment.events.append(self.evt)
     self.segment2 = Segment()
     self.segment2.analogsignals.append(self.signal2)
     self.segment2.events.append(self.evt2)
     self.segments = [self.segment, self.segment2]
Exemplo n.º 7
0
 def _read_eventarray(self, node, parent):
     attributes = self._get_standard_attributes(node)
     times = self._get_quantity(node["times"])
     labels = node["labels"].value.astype('U')
     event = Event(times=times, labels=labels, **attributes)
     event.segment = parent
     return event
Exemplo n.º 8
0
    def create_event(self, parent=None, name='Event'):
        event = Event([1.0, 2.3, 4.1] * pq.s,
                      np.array([chr(0) + 'trig1', chr(0) + 'trig2', chr(0) + 'trig3']));

        event.segment = parent
        self._assign_basic_attributes(event, name=name)

        return event
Exemplo n.º 9
0
    def load(self, time_slice=None, strict_slicing=True):
        '''
        *Args*:
            :time_slice: None or tuple of the time slice expressed with quantities.
                            None is the entire signal.
            :strict_slicing: True by default.
                 Control if an error is raise or not when one of  time_slice member (t_start or t_stop)
                 is outside the real time range of the segment.
        '''

        t_start, t_stop = consolidate_time_slice(time_slice, self.t_start,
                                                 self.t_stop, strict_slicing)
        _t_start, _t_stop = prepare_time_slice(time_slice)

        timestamp, durations, labels = self._rawio.get_event_timestamps(
            block_index=self._block_index,
            seg_index=self._seg_index,
            event_channel_index=self._event_channel_index,
            t_start=_t_start,
            t_stop=_t_stop)

        dtype = 'float64'
        times = self._rawio.rescale_event_timestamp(timestamp, dtype=dtype)
        units = 's'

        if durations is not None:
            durations = self._rawio.rescale_epoch_duration(durations,
                                                           dtype=dtype) * pq.s

        h = self._rawio.header['event_channels'][self._event_channel_index]
        if h['type'] == b'event':
            ret = Event(times=times,
                        labels=labels,
                        units='s',
                        name=self.name,
                        file_origin=self.file_origin,
                        description=self.description,
                        **self.annotations)
        elif h['type'] == b'epoch':
            ret = Epoch(times=times,
                        durations=durations,
                        labels=labels,
                        units='s',
                        name=self.name,
                        file_origin=self.file_origin,
                        description=self.description,
                        **self.annotations)

        if time_slice is None:
            ret.array_annotate(**self.array_annotations)
        else:
            # TODO handle array_annotations with time_slice
            pass

        return ret
Exemplo n.º 10
0
 def setUp(self):
     self.evt = Event(np.arange(0, 100, 1) * pq.s,
                      labels=np.repeat(np.array(['t0', 't1'], dtype='S'),
                                       50))
     self.evt2 = Event(np.arange(0, 100, 1) * pq.s,
                       labels=np.repeat(np.array(['t2', 't3'], dtype='S'),
                                        50))
     self.events = [self.evt, self.evt2]
     self.evt_start = 10
     self.evt_pre_start = self.evt_start - 5
     self.evt_end = 90
     self.evt_post_end = self.evt_end + 5
Exemplo n.º 11
0
 def _read_eventarray(self, node, parent):
     attributes = self._get_standard_attributes(node)
     times = self._get_quantity(node["times"])
     if self._lazy:
         labels = np.array((), dtype=node["labels"].dtype)
     else:
         labels = node["labels"].value
     event = Event(times=times, labels=labels, **attributes)
     event.segment = parent
     if self._lazy:
         event.lazy_shape = node["times"].shape
     return event
Exemplo n.º 12
0
 def _read_eventarray(self, node, parent):
     attributes = self._get_standard_attributes(node)
     times = self._get_quantity(node["times"])
     if self._lazy:
         labels = np.array((), dtype=node["labels"].dtype)
     else:
         labels = node["labels"].value
     event = Event(times=times, labels=labels, **attributes)
     event.segment = parent
     if self._lazy:
         event.lazy_shape = node["times"].shape
     return event
Exemplo n.º 13
0
 def setUp(self):
     self.evt = Event(times=np.arange(0, 100, 1) * pq.s,
                      name='Ch1',
                      labels=np.repeat(np.array(['t0', 't1'], dtype='S'),
                                       50))
     self.evt2 = Event(times=np.arange(0, 100, 3) * pq.s,
                       name='Ch2',
                       labels=np.repeat(np.array(['t2', 't3'], dtype='S'),
                                        17))
     self.eventlist = [{'ch': 0, 'times': self.evt.times}, \
                     {'ch': 1, 'times': self.evt2.times}]
     self.df = pd.DataFrame(data=[[1, 0], [1, 1]],
                            index=['start', 'stop'],
                            columns=['Ch1', 'Ch2'])
Exemplo n.º 14
0
 def setUp(self):
     self.evt = Event(times=np.arange(0, 100, 1) * pq.s,
                      name='Ch1',
                      labels=np.repeat(np.array(['t0', 't1'], dtype='S'),
                                       50))
     self.evt2 = Event(times=np.arange(0, 100, 3) * pq.s,
                       name='Ch2',
                       labels=np.repeat(np.array(['t2', 't3'], dtype='S'),
                                        17))
     self.segment = Segment()
     self.segment.events.append(self.evt)
     self.segment.events.append(self.evt2)
     self.df = pd.DataFrame(data=[[1, 0], [1, 1]],
                            index=['start', 'stop'],
                            columns=['Ch1', 'Ch2'])
Exemplo n.º 15
0
    def merge(self, other):
        '''
        Merge the another :class:`Event` into this one.

        The :class:`Event` objects are concatenated horizontally
        (column-wise), :func:`np.hstack`).

        If the attributes of the two :class:`Event` are not
        compatible, and Exception is raised.
        '''
        othertimes = other.times.rescale(self.times.units)
        times = np.hstack([self.times, othertimes]) * self.times.units
        labels = np.hstack([self.labels, other.labels])
        kwargs = {}
        for name in ("name", "description", "file_origin"):
            attr_self = getattr(self, name)
            attr_other = getattr(other, name)
            if attr_self == attr_other:
                kwargs[name] = attr_self
            else:
                kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)

        merged_annotations = merge_annotations(self.annotations,
                                               other.annotations)
        kwargs.update(merged_annotations)
        return Event(times=times, labels=labels, **kwargs)
Exemplo n.º 16
0
def process_events(seg, tolerence):
    if 'Events' not in [cur_evts.name for cur_evts in seg.events]:
        evtlist = list()
        event_times = list()
        event_labels = list()
        for evtarr in seg.events:
            if 'DIn' in evtarr.name:
                evtlist.append(
                    dict(times=evtarr.times, ch=int(evtarr.name[-1]) - 1))
        while any(event_array['times'].size for event_array in evtlist):
            evtlist_non_empty = [x for x in evtlist if x['times'].size]
            first_elements = [x['times'][0] for x in evtlist_non_empty]
            cur_first = np.amin(first_elements) * pq.s
            cur_event = 0
            cur_event_list = [0] * len(evtlist)
            for evtarr in evtlist_non_empty:
                if evtarr['times'][0] - cur_first < tolerence:
                    cur_event_list[evtarr['ch']] = 1
                    evtarr['times'] = np.delete(evtarr['times'], 0) * pq.s
            for bit in cur_event_list:
                cur_event = (cur_event << 1) | bit
            event_times.append(cur_first)
            event_labels.append(cur_event)
            evtlist = evtlist_non_empty
        result = Event(times=np.array(event_times) * pq.s,
                       labels=np.array(event_labels, dtype='S'),
                       name='Events')
        seg.events.append(result)
    else:
        print("Events array already presented!")
Exemplo n.º 17
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))
        cls.populate_dates(blk)

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        cls.populate_dates(seg)
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times,
                                        signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
Exemplo n.º 18
0
 def setUp(self):
     self.evt = Event(np.arange(0, 100, 1) * pq.s,
                      labels=np.repeat(np.array(['t0', 't1'], dtype='S'),
                                       50))
     self.not_evt = np.random.randn(1000, 1)
     self.evt_start = 10
     self.evt_pre_start = self.evt_start - 5
     self.evt_end = 90
     self.evt_post_end = self.evt_end + 5
Exemplo n.º 19
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times, signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
Exemplo n.º 20
0
 def read_eventarray(self,
                     lazy=False,
                     cascade=True,
                     channel_index=0,
                     t_start=0.,
                     segment_duration=0.):
     """function to read digital timestamps. this function only reads the event
     onset. to get digital event durations, use the epoch function (to be implemented)."""
     if lazy:
         eva = Event(file_origin=self.filename)
     else:
         #create temporary empty lists to store data
         tempNames = list()
         tempTimeStamp = list()
         #get entity from file
         trigEntity = self.fd.get_entity(channel_index)
         #transform t_start into index (reading will start from this index)
         startat = trigEntity.get_index_by_time(
             t_start, 0)  #zero means closest index to value
         #get the last index to read, using segment duration and t_start
         endat = trigEntity.get_index_by_time(
             float(segment_duration + t_start),
             -1)  #-1 means last index before time
         #numIndx = endat-startat
         #run through specified intervals in entity
         for i in range(startat, endat + 1, 1):  #trigEntity.item_count):
             #get in which digital bit was the trigger detected
             tempNames.append(trigEntity.label[-8:])
             #get the time stamps of onset events
             tempData, onOrOff = trigEntity.get_data(i)
             #if this was an onset event, save it to the list
             #on triggered recordings it seems that only onset events are
             #recorded. On continuous recordings both onset(==1)
             #and offset(==255) seem to be recorded
             if onOrOff == 1:
                 #append the time stamp to them empty list
                 tempTimeStamp.append(tempData)
             #create an event array
         eva = Event(labels=np.array(tempNames, dtype="S"),
                     times=np.array(tempTimeStamp) * pq.s,
                     file_origin=self.filename,
                     description="the trigger events (without durations)")
     return eva
Exemplo n.º 21
0
    def test_event_write(self):
        block = Block()
        seg = Segment()
        block.segments.append(seg)

        event = Event(times=np.arange(0, 30, 10) * pq.s,
                      labels=np.array(["0", "1", "2"]),
                      name="event name",
                      description="event description")
        seg.events.append(event)
        self.write_and_compare([block])
Exemplo n.º 22
0
    def read_event(fh, block_id, array_id):
        nix_block = fh.handle.blocks[block_id]
        nix_da = nix_block.data_arrays[array_id]

        params = {
            'times': nix_da[:],  # TODO think about lazy data loading
            'labels': [x.encode('UTF-8') for x in nix_da.dimensions[0].labels]
        }

        name = Reader.Help.get_obj_neo_name(nix_da)
        if name:
            params['name'] = name

        event = Event(**params)

        for key, value in Reader.Help.read_attributes(nix_da.metadata, 'event').items():
            setattr(event, key, value)

        event.annotations = Reader.Help.read_annotations(nix_da.metadata, 'event')

        return event
Exemplo n.º 23
0
def _filter_event_channel(event_channel: Event, label_filter: Callable[[str], bool]) -> Event:
    # list or ndarray
    labels = event_channel.labels
    # always an ndarray
    times: NPArray = event_channel.times
    matches = label_filter(labels) if isinstance(labels, NPArray) \
         else [label_filter(l) for l in labels]
    
    new_times = times[matches]
    new_labels = labels[matches] if isinstance(labels, NPArray) \
        else [l for l in labels if label_filter(l)]
    return Event(times=new_times, labels=new_labels, units=event_channel.units)
Exemplo n.º 24
0
 def test_annotations(self):
     self.testfilename = self.get_filename_path('nixio_fr_ann.nix')
     with NixIO(filename=self.testfilename, mode='ow') as io:
         annotations = {'my_custom_annotation': 'hello block'}
         bl = Block(**annotations)
         annotations = {'something': 'hello hello000'}
         seg = Segment(**annotations)
         an =AnalogSignal([[1, 2, 3], [4, 5, 6]], units='V',
                                     sampling_rate=1*pq.Hz)
         an.annotations['ansigrandom'] = 'hello chars'
         sp = SpikeTrain([3, 4, 5]* s, t_stop=10.0)
         sp.annotations['railway'] = 'hello train'
         ev = Event(np.arange(0, 30, 10)*pq.Hz,
                    labels=np.array(['trig0', 'trig1', 'trig2'], dtype='S'))
         ev.annotations['venue'] = 'hello event'
         ev2 = Event(np.arange(0, 30, 10) * pq.Hz,
                    labels=np.array(['trig0', 'trig1', 'trig2'], dtype='S'))
         ev2.annotations['evven'] = 'hello ev'
         seg.spiketrains.append(sp)
         seg.events.append(ev)
         seg.events.append(ev2)
         seg.analogsignals.append(an)
         bl.segments.append(seg)
         io.write_block(bl)
         io.close()
     with NixIOfr(filename=self.testfilename) as frio:
         frbl = frio.read_block()
         assert 'my_custom_annotation' in frbl.annotations
         assert 'something' in frbl.segments[0].annotations
         # assert 'ansigrandom' in frbl.segments[0].analogsignals[0].annotations
         assert 'railway' in frbl.segments[0].spiketrains[0].annotations
         assert 'venue' in frbl.segments[0].events[0].annotations
         assert 'evven' in frbl.segments[0].events[1].annotations
     os.remove(self.testfilename)
Exemplo n.º 25
0
def _read_main_pulse_file(filepaths: List[str]) -> Event:
    try:
        # read pulse file
        pulse_file = [
            file for file in filepaths
            if "pulses" in os.path.basename(file).lower()
        ][0]
        pulses_df = pd.read_csv(filepath_or_buffer=pulse_file,
                                header=None,
                                names=["timestamp", "comment"])
        times = Quantity(pulses_df["timestamp"], "s")

        pulses = Event(times=times,
                       labels=pulses_df["comment"],
                       name="Dapsys Main Pulse",
                       file_origin=pulse_file)
        channel_id = f"{TypeID.ELECTRICAL_STIMULUS.value}.0"
        pulses.annotate(id=channel_id,
                        type_id=TypeID.ELECTRICAL_STIMULUS.value)

        intervals: Quantity = np.diff(times)
        intervals = quantity_concat(intervals,
                                    np.array([float("inf")]) * second)
        pulses.array_annotate(intervals=intervals)

        return pulses
    except Exception as ex:
        traceback.print_exc()
Exemplo n.º 26
0
def _new_event(cls,
               signal,
               times=None,
               labels=None,
               units=None,
               name=None,
               file_origin=None,
               description=None,
               annotations=None,
               segment=None):
    '''
    A function to map Event.__new__ to function that
    does not do the unit checking. This is needed for pickle to work. 
    '''
    e = Event(signal=signal,
              times=times,
              labels=labels,
              units=units,
              name=name,
              file_origin=file_origin,
              description=description,
              **annotations)
    e.segment = segment
    return e
Exemplo n.º 27
0
def random_event(name=None, **annotations):
    size = random.randint(1, 7)
    times = np.cumsum(np.random.uniform(5, 10, size=size))
    labels = [random_string() for i in range(size)]
    if len(annotations) == 0:
        annotations = random_annotations(3)
    obj = Event(
        times=times,
        labels=labels,
        units="ms",
        name=name or random_string(),
        array_annotations=None,   # todo
        **annotations
    )
    return obj
Exemplo n.º 28
0
    def test_anonymous_objects_write(self):
        nblocks = 2
        nsegs = 2
        nanasig = 4
        nirrseg = 2
        nepochs = 3
        nevents = 4
        nspiketrains = 3
        nchx = 5
        nunits = 10

        times = self.rquant(1, pq.s)
        signal = self.rquant(1, pq.V)
        blocks = []
        for blkidx in range(nblocks):
            blk = Block()
            blocks.append(blk)
            for segidx in range(nsegs):
                seg = Segment()
                blk.segments.append(seg)
                for anaidx in range(nanasig):
                    seg.analogsignals.append(AnalogSignal(signal=signal,
                                                          sampling_rate=pq.Hz))
                for irridx in range(nirrseg):
                    seg.irregularlysampledsignals.append(
                        IrregularlySampledSignal(times=times,
                                                 signal=signal,
                                                 time_units=pq.s)
                    )
                for epidx in range(nepochs):
                    seg.epochs.append(Epoch(times=times, durations=times))
                for evidx in range(nevents):
                    seg.events.append(Event(times=times))
                for stidx in range(nspiketrains):
                    seg.spiketrains.append(SpikeTrain(times=times,
                                                      t_stop=times[-1]+pq.s,
                                                      units=pq.s))
            for chidx in range(nchx):
                chx = ChannelIndex(name="chx{}".format(chidx),
                                   index=[1, 2],
                                   channel_ids=[11, 22])
                blk.channel_indexes.append(chx)
                for unidx in range(nunits):
                    unit = Unit()
                    chx.units.append(unit)
        self.writer.write_all_blocks(blocks)
        self.compare_blocks(blocks, self.reader.blocks)
Exemplo n.º 29
0
    def test__match_events(self):
        proxy_event = EventProxy(rawio=self.reader, event_channel_index=0,
                                 block_index=0, seg_index=0)

        loaded_event = proxy_event.load()

        regular_event = Event(times=loaded_event.times - 1 * loaded_event.units,
                              labels=np.array(['trigger_a', 'trigger_b'] * 3, dtype='U12'))

        seg = Segment()
        seg.events = [regular_event, proxy_event]

        # test matching two events one of which is a proxy
        matched_regular, matched_proxy = match_events(regular_event, proxy_event)

        assert_same_attributes(matched_regular, regular_event)
        assert_same_attributes(matched_proxy, loaded_event)
Exemplo n.º 30
0
    def test_multiref_write(self):
        blk = Block("blk1")
        signal = AnalogSignal(name="sig1",
                              signal=[0, 1, 2],
                              units="mV",
                              sampling_period=pq.Quantity(1, "ms"))
        othersignal = IrregularlySampledSignal(name="i1",
                                               signal=[0, 0, 0],
                                               units="mV",
                                               times=[1, 2, 3],
                                               time_units="ms")
        event = Event(name="Evee", times=[0.3, 0.42], units="year")
        epoch = Epoch(name="epoche",
                      times=[0.1, 0.2] * pq.min,
                      durations=[0.5, 0.5] * pq.min)
        st = SpikeTrain(name="the train of spikes",
                        times=[0.1, 0.2, 10.3],
                        t_stop=11,
                        units="us")

        for idx in range(3):
            segname = "seg" + str(idx)
            seg = Segment(segname)
            blk.segments.append(seg)
            seg.analogsignals.append(signal)
            seg.irregularlysampledsignals.append(othersignal)
            seg.events.append(event)
            seg.epochs.append(epoch)
            seg.spiketrains.append(st)

        chidx = ChannelIndex([10, 20, 29])
        seg = blk.segments[0]
        st = SpikeTrain(name="choochoo",
                        times=[10, 11, 80],
                        t_stop=1000,
                        units="s")
        seg.spiketrains.append(st)
        blk.channel_indexes.append(chidx)
        for idx in range(6):
            unit = Unit("unit" + str(idx))
            chidx.units.append(unit)
            unit.spiketrains.append(st)

        self.writer.write_block(blk)
        self.compare_blocks([blk], self.reader.blocks)
Exemplo n.º 31
0
    def test__add_epoch(self):
        proxy_event = EventProxy(rawio=self.reader, event_channel_index=0,
                                 block_index=0, seg_index=0)

        loaded_event = proxy_event.load()

        regular_event = Event(times=loaded_event.times - 1 * loaded_event.units)

        seg = Segment()
        seg.events = [regular_event, proxy_event]

        # test cutting with two events one of which is a proxy
        epoch = add_epoch(seg, regular_event, proxy_event)

        assert_neo_object_is_compliant(epoch)
        assert_same_annotations(epoch, regular_event)
        assert_arrays_almost_equal(epoch.times, regular_event.times, 1e-12)
        assert_arrays_almost_equal(epoch.durations,
                                   np.ones(regular_event.shape) * loaded_event.units, 1e-12)
Exemplo n.º 32
0
 def load(self, time_slice=None, strict_slicing=True):
     """
     Load EventProxy args:
         :param time_slice: None or tuple of the time slice expressed with quantities.
                         None is the entire signal.
         :param strict_slicing: True by default.
             Control if an error is raised or not when one of the time_slice members
             (t_start or t_stop) is outside the real time range of the segment.
     """
     if time_slice:
         raise NotImplementedError("todo")
     else:
         times = self._timeseries.timestamps[:]
         labels = self._timeseries.data[:]
     return Event(times * pq.s,
                  labels=labels,
                  name=self.name,
                  description=self.description,
                  **self.annotations)
Exemplo n.º 33
0
def proc_src_comments(srcfile, filename):
    '''Get the comments in an src file that has been#!N
    processed by the official
    matlab function.  See proc_src for details'''
    comm_seg = Segment(name='Comments', file_origin=filename)
    commentarray = srcfile['comments'].flatten()[0]
    senders = [res[0] for res in commentarray['sender'].flatten()]
    texts = [res[0] for res in commentarray['text'].flatten()]
    timeStamps = [res[0, 0] for res in commentarray['timeStamp'].flatten()]

    timeStamps = np.array(timeStamps, dtype=np.float32)
    t_start = timeStamps.min()
    timeStamps = pq.Quantity(timeStamps - t_start, units=pq.d).rescale(pq.s)
    texts = np.array(texts, dtype='U')
    senders = np.array(senders, dtype='S')
    t_start = brainwaresrcio.convert_brainwaresrc_timestamp(t_start.tolist())

    comments = Event(times=timeStamps, labels=texts, senders=senders)
    comm_seg.events = [comments]
    comm_seg.rec_datetime = t_start

    return comm_seg
    def read_one_channel_event_or_spike(self, fid, channel_num, header,
                                        lazy=True):
        # return SPikeTrain or Event
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock < 0:
            return
        if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
            return

        # # Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick', 'i4')]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick', 'i4'), ('marker', 'i4')]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('adc', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('real', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('label', 'S%d' % channelHeader.n_extra)]
        dt = np.dtype(fmt)

        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0:
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems

        if lazy:
            if channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = Event()
                ea.annotate(channel_index=channel_num)
                ea.lazy_shape = totalitems
                return ea

            elif channelHeader.kind in [6, 7]:
                # correct value for t_stop to be put in later
                sptr = SpikeTrain([] * pq.s, t_stop=1e99)
                sptr.annotate(channel_index=channel_num, ced_unit = 0)
                sptr.lazy_shape = totalitems
                return sptr
        else:
            alltrigs = np.zeros(totalitems, dtype=dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks):
                blockHeader = HeaderReader(
                    fid, np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring(
                    fid.read(blockHeader.items * dt.itemsize), dtype=dt)

                alltrigs[pos:pos + trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class: eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype(
                'f') * header.us_per_time * header.dtime_base * pq.s

            if channelHeader.kind in [2, 3, 4, 5, 8]:
                #events
                ea = Event(alltimes)
                ea.annotate(channel_index=channel_num)
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels=alltrigs['label'])
                return ea

            elif channelHeader.kind in [6, 7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6:
                    waveforms = np.fromstring(alltrigs['adc'].tostring(),
                                              dtype='i2')
                    waveforms = waveforms.astype(
                        'f4') * channelHeader.scale / 6553.6 + \
                        channelHeader.offset
                elif channelHeader.kind == 7:
                    waveforms = np.fromstring(alltrigs['real'].tostring(),
                                              dtype='f4')

                if header.system_id >= 6 and channelHeader.interleave > 1:
                    waveforms = waveforms.reshape(
                        (alltimes.size, -1, channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1, 2)
                else:
                    waveforms = waveforms.reshape((alltimes.size, 1, -1))

                if header.system_id in [1, 2, 3, 4, 5]:
                    sample_interval = (channelHeader.divide *
                                       header.us_per_time *
                                       header.time_per_adc) * 1e-6
                else:
                    sample_interval = (channelHeader.l_chan_dvd *
                                       header.us_per_time *
                                       header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit])
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit)
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    # can get better value from associated AnalogSignal(s) ?
                    t_stop = alltimes.max()
                else:
                    t_stop = 0.0

                if not self.ced_units:
                    sptr = SpikeTrain(alltimes,
                                                waveforms = waveforms*unit,
                                                sampling_rate = (1./sample_interval)*pq.Hz,
                                                t_stop = t_stop
                                                )
                    sptr.annotate(channel_index = channel_num, ced_unit = 0)
                    return [sptr]

                sptrs = []
                for i in set(alltrigs['marker'] & 255):
                    sptr = SpikeTrain(alltimes[alltrigs['marker'] == i],
                                                waveforms = waveforms[alltrigs['marker'] == i]*unit,
                                                sampling_rate = (1./sample_interval)*pq.Hz,
                                                t_stop = t_stop
                                                )
                    sptr.annotate(channel_index = channel_num, ced_unit = i)
                    sptrs.append(sptr)

                return sptrs
Exemplo n.º 35
0
    def read_segment(self, block_index=0, seg_index=0, lazy=False,
                     signal_group_mode=None, load_waveforms=False, time_slice=None):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default.

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.
        """

        if lazy:
            warnings.warn(
                "Lazy is deprecated and will be replaced by ProxyObject functionality.",
                DeprecationWarning)

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        seg_t_start = self.segment_t_start(block_index, seg_index) * pq.s
        seg_t_stop = self.segment_t_stop(block_index, seg_index) * pq.s

        # get only a slice of objects limited by t_start and t_stop time_slice = (t_start, t_stop)
        if time_slice is None:
            t_start, t_stop = None, None
            t_start_, t_stop_ = None, None
        else:
            assert not lazy, 'time slice only work when not lazy'
            t_start, t_stop = time_slice

            t_start = ensure_second(t_start)
            t_stop = ensure_second(t_stop)

            # checks limits
            if t_start < seg_t_start:
                t_start = seg_t_start
            if t_stop > seg_t_stop:
                t_stop = seg_t_stop

            # in float format in second (for rawio clip)
            t_start_, t_stop_ = float(t_start.magnitude), float(t_stop.magnitude)

            # new spiketrain limits
            seg_t_start = t_start
            seg_t_stop = t_stop

        # AnalogSignal
        signal_channels = self.header['signal_channels']

        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                sr = self.get_signal_sampling_rate(channel_indexes) * pq.Hz
                sig_t_start = self.get_signal_t_start(
                    block_index, seg_index, channel_indexes) * pq.s

                sig_size = self.get_signal_size(block_index=block_index, seg_index=seg_index,
                                                channel_indexes=channel_indexes)
                if not lazy:
                    # in case of time_slice get: get i_start, i_stop, new sig_t_start
                    if t_stop is not None:
                        i_stop = int((t_stop - sig_t_start).magnitude * sr.magnitude)
                        if i_stop > sig_size:
                            i_stop = sig_size
                    else:
                        i_stop = None
                    if t_start is not None:
                        i_start = int((t_start - sig_t_start).magnitude * sr.magnitude)
                        if i_start < 0:
                            i_start = 0
                        sig_t_start += (i_start / sr).rescale('s')
                    else:
                        i_start = None

                    raw_signal = self.get_analogsignal_chunk(block_index=block_index,
                                                             seg_index=seg_index, i_start=i_start,
                                                             i_stop=i_stop,
                                                             channel_indexes=channel_indexes)
                    float_signal = self.rescale_signal_raw_to_float(
                        raw_signal,
                        dtype='float32',
                        channel_indexes=channel_indexes)

                for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
                        channel_indexes,
                        signal_group_mode=signal_group_mode).items():
                    units = np.unique(signal_channels[ind_abs]['units'])
                    assert len(units) == 1
                    units = ensure_signal_units(units[0])

                    if signal_group_mode == 'split-all':
                        # in that case annotations by channel is OK
                        chan_index = ind_abs[0]
                        d = self.raw_annotations['blocks'][block_index]['segments'][seg_index][
                            'signals'][chan_index]
                        annotations = dict(d)
                        if 'name' not in annotations:
                            annotations['name'] = signal_channels['name'][chan_index]
                    else:
                        # when channel are grouped by same unit
                        # annotations have channel_names and channel_ids array
                        # this will be moved in array annotations soon
                        annotations = {}
                        annotations['name'] = 'Channel bundle ({}) '.format(
                            ','.join(signal_channels[ind_abs]['name']))
                        annotations['channel_names'] = signal_channels[ind_abs]['name']
                        annotations['channel_ids'] = signal_channels[ind_abs]['id']
                    annotations = check_annotations(annotations)
                    if lazy:
                        anasig = AnalogSignal(np.array([]), units=units, copy=False,
                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
                        anasig.lazy_shape = (sig_size, len(ind_within))
                    else:
                        anasig = AnalogSignal(float_signal[:, ind_within], units=units, copy=False,
                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            if not lazy and load_waveforms:
                raw_waveforms = self.get_spike_raw_waveforms(block_index=block_index,
                                                             seg_index=seg_index,
                                                             unit_index=unit_index,
                                                             t_start=t_start_, t_stop=t_stop_)
                float_waveforms = self.rescale_waveforms_to_float(raw_waveforms, dtype='float32',
                                                                  unit_index=unit_index)
                wf_units = ensure_signal_units(unit_channels['wf_units'][unit_index])
                waveforms = pq.Quantity(float_waveforms, units=wf_units,
                                        dtype='float32', copy=False)
                wf_sampling_rate = unit_channels['wf_sampling_rate'][unit_index]
                wf_left_sweep = unit_channels['wf_left_sweep'][unit_index]
                if wf_left_sweep > 0:
                    wf_left_sweep = float(wf_left_sweep) / wf_sampling_rate * pq.s
                else:
                    wf_left_sweep = None
                wf_sampling_rate = wf_sampling_rate * pq.Hz
            else:
                waveforms = None
                wf_left_sweep = None
                wf_sampling_rate = None

            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][
                unit_index]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = unit_channels['name'][c]
            annotations = check_annotations(annotations)

            if not lazy:
                spike_timestamp = self.get_spike_timestamps(block_index=block_index,
                                                            seg_index=seg_index,
                                                            unit_index=unit_index,
                                                            t_start=t_start_, t_stop=t_stop_)
                spike_times = self.rescale_spike_timestamp(spike_timestamp, 'float64')
                sptr = SpikeTrain(spike_times, units='s', copy=False,
                                  t_start=seg_t_start, t_stop=seg_t_stop,
                                  waveforms=waveforms, left_sweep=wf_left_sweep,
                                  sampling_rate=wf_sampling_rate, **annotations)
            else:
                nb = self.spike_count(block_index=block_index, seg_index=seg_index,
                                      unit_index=unit_index)
                sptr = SpikeTrain(np.array([]), units='s', copy=False, t_start=seg_t_start,
                                  t_stop=seg_t_stop, **annotations)
                sptr.lazy_shape = (nb,)

            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if not lazy:
                ev_timestamp, ev_raw_durations, ev_labels = self.get_event_timestamps(
                    block_index=block_index,
                    seg_index=seg_index, event_channel_index=chan_ind,
                    t_start=t_start_, t_stop=t_stop_)
                ev_times = self.rescale_event_timestamp(ev_timestamp, 'float64') * pq.s
                if ev_raw_durations is None:
                    ev_durations = None
                else:
                    ev_durations = self.rescale_epoch_duration(ev_raw_durations, 'float64') * pq.s
                ev_labels = ev_labels.astype('S')
            else:
                nb = self.event_count(block_index=block_index, seg_index=seg_index,
                                      event_channel_index=chan_ind)
                lazy_shape = (nb,)
                ev_times = np.array([]) * pq.s
                ev_labels = np.array([], dtype='S')
                ev_durations = np.array([]) * pq.s

            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][
                chan_ind]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = event_channels['name'][chan_ind]

            annotations = check_annotations(annotations)

            if event_channels['type'][chan_ind] == b'event':
                e = Event(times=ev_times, labels=ev_labels, units='s', copy=False, **annotations)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = Epoch(times=ev_times, durations=ev_durations, labels=ev_labels,
                          units='s', copy=False, **annotations)
                e.segment = seg
                seg.epochs.append(e)

            if lazy:
                e.lazy_shape = lazy_shape

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 36
0
    def read_segment(self,
                     # the 2 first keyword arguments are imposed by neo.io API
                     lazy = False,
                     cascade = True,
                     # all following arguments are decied by this IO and are free
                     segment_duration = 15.,
                     num_analogsignal = 4,
                     num_spiketrain_by_channel = 3,
                    ):
        """
        Return a fake Segment.

        The self.filename does not matter.

        In this IO read by default a Segment.

        This is just a example to be adapted to each ClassIO.
        In this case these 3 paramters are  taken in account because this function
        return a generated segment with fake AnalogSignal and fake SpikeTrain.

        Parameters:
            segment_duration :is the size in secend of the segment.
            num_analogsignal : number of AnalogSignal in this segment
            num_spiketrain : number of SpikeTrain in this segment

        """

        sampling_rate = 10000. #Hz
        t_start = -1.


        #time vector for generated signal
        timevect = np.arange(t_start, t_start+ segment_duration , 1./sampling_rate)

        # create an empty segment
        seg = Segment( name = 'it is a seg from exampleio')

        if cascade:
            # read nested analosignal
            for i in range(num_analogsignal):
                ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
                                            channel_index = i ,segment_duration = segment_duration, t_start = t_start)
                seg.analogsignals += [ ana ]

            # read nested spiketrain
            for i in range(num_analogsignal):
                for _ in range(num_spiketrain_by_channel):
                    sptr = self.read_spiketrain(lazy = lazy , cascade = cascade ,
                                                            segment_duration = segment_duration, t_start = t_start , channel_index = i)
                    seg.spiketrains += [ sptr ]


            # create an Event that mimic triggers.
            # note that ExampleIO  do not allow to acess directly to Event
            # for that you need read_segment(cascade = True)

            if lazy:
                # in lazy case no data are readed
                # eva is empty
                eva = Event()
            else:
                # otherwise it really contain data
                n = 1000

                # neo.io support quantities my vector use second for unit
                eva = Event(timevect[(np.random.rand(n)*timevect.size).astype('i')]* pq.s)
                # all duration are the same
                eva.durations = np.ones(n)*500*pq.ms  # Event doesn't have durations. Is Epoch intended here?
                # label
                l = [ ]
                for i in range(n):
                    if np.random.rand()>.6: l.append( 'TriggerA' )
                    else : l.append( 'TriggerB' )
                eva.labels = np.array( l )

            seg.events += [ eva ]

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 37
0
    def test__issue_285(self):
        # Spiketrain
        train = SpikeTrain([3, 4, 5] * pq.s, t_stop=10.0)
        unit = Unit()
        train.unit = unit
        unit.spiketrains.append(train)

        epoch = Epoch(np.array([0, 10, 20]),
                      np.array([2, 2, 2]),
                      np.array(["a", "b", "c"]),
                      units="ms")

        blk = Block()
        seg = Segment()
        seg.spiketrains.append(train)
        seg.epochs.append(epoch)
        epoch.segment = seg
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.spiketrains[0].unit, Unit)
        self.assertIsInstance(r_seg.epochs[0], Epoch)
        os.remove('blk.pkl')

        # Epoch
        epoch = Epoch(times=np.arange(0, 30, 10) * pq.s,
                      durations=[10, 5, 7] * pq.ms,
                      labels=np.array(['btn0', 'btn1', 'btn2'], dtype='S'))
        epoch.segment = Segment()
        blk = Block()
        seg = Segment()
        seg.epochs.append(epoch)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.epochs[0].segment, Segment)
        os.remove('blk.pkl')

        # Event
        event = Event(np.arange(0, 30, 10) * pq.s,
                      labels=np.array(['trig0', 'trig1', 'trig2'], dtype='S'))
        event.segment = Segment()

        blk = Block()
        seg = Segment()
        seg.events.append(event)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.events[0].segment, Segment)
        os.remove('blk.pkl')

        # IrregularlySampledSignal
        signal = IrregularlySampledSignal(
            [0.0, 1.23, 6.78], [1, 2, 3], units='mV', time_units='ms')
        signal.segment = Segment()

        blk = Block()
        seg = Segment()
        seg.irregularlysampledsignals.append(signal)
        blk.segments.append(seg)
        blk.segments[0].block = blk

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.irregularlysampledsignals[0].segment, Segment)
        os.remove('blk.pkl')
Exemplo n.º 38
0
def generate_one_simple_segment(seg_name='segment 0', supported_objects=[], nb_analogsignal=4,
                                t_start=0. * pq.s, sampling_rate=10 * pq.kHz, duration=6. * pq.s,

                                nb_spiketrain=6, spikerate_range=[.5 * pq.Hz, 12 * pq.Hz],

                                event_types={'stim': ['a', 'b', 'c', 'd'],
                                             'enter_zone': ['one', 'two'],
                                             'color': ['black', 'yellow', 'green'], },
                                event_size_range=[5, 20],

                                epoch_types={'animal state': ['Sleep', 'Freeze', 'Escape'],
                                             'light': ['dark', 'lighted']},
                                epoch_duration_range=[.5, 3.],
                                # this should be multiplied by pq.s, no?

                                array_annotations={'valid': np.array([True, False]),
                                                   'number': np.array(range(5))}

                                ):
    if supported_objects and Segment not in supported_objects:
        raise ValueError('Segment must be in supported_objects')
    seg = Segment(name=seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int(sampling_rate * duration)), sampling_rate=sampling_rate,
                                  t_start=t_start, units=pq.mV, channel_index=a,
                                  name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand() * np.diff(spikerate_range)
            spikerate += spikerate_range[0].magnitude
            # spikedata = rand(int((spikerate*duration).simplified))*duration
            # sptr = SpikeTrain(spikedata,
            #                  t_start=t_start, t_stop=t_start+duration)
            #                  #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate * duration).simplified))
            spikes.sort()  # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain(spikes * duration, t_start=t_start, t_stop=t_start + duration)
            sptr.annotations['channel_index'] = s
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(len(spikes)) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            sptr.array_annotate(**arr_ann)
            seg.spiketrains.append(sptr)

    if Event in supported_objects:
        for name, labels in event_types.items():
            evt_size = rand() * np.diff(event_size_range)
            evt_size += event_size_range[0]
            evt_size = int(evt_size)
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(evt_size) * len(labels)).astype('i')]
            evt = Event(times=rand(evt_size) * duration, labels=labels)
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(evt_size) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            evt.array_annotate(**arr_ann)
            seg.events.append(evt)

    if Epoch in supported_objects:
        for name, labels in epoch_types.items():
            t = 0
            times = []
            durations = []
            while t < duration:
                times.append(t)
                dur = rand() * (epoch_duration_range[1] - epoch_duration_range[0])
                dur += epoch_duration_range[0]
                durations.append(dur)
                t = t + dur
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(len(times)) * len(labels)).astype('i')]
            assert len(times) == len(durations)
            assert len(times) == len(labels)
            epc = Epoch(times=pq.Quantity(times, units=pq.s),
                        durations=pq.Quantity(durations, units=pq.s),
                        labels=labels,)
            assert epc.times.dtype == 'float'
            # Randomly generate array_annotations from given options
            arr_ann = {key: value[(rand(len(times)) * len(value)).astype('i')] for (key, value) in
                       array_annotations.items()}
            epc.array_annotate(**arr_ann)
            seg.epochs.append(epc)

    # TODO : Spike, Event

    seg.create_many_to_one_relationship()
    return seg
    def read_segment(self, cascade=True, lazy=False, ):
        """
        Arguments:
        """
        f = StructFile(open(self.filename, 'rb'))

        # Name
        f.seek(64, 0)
        surname = f.read(22).decode('ascii')
        while surname[-1] == ' ':
            if len(surname) == 0:
                break
            surname = surname[:-1]
        firstname = f.read(20).decode('ascii')
        while firstname[-1] == ' ':
            if len(firstname) == 0:
                break
            firstname = firstname[:-1]

        #Date
        f.seek(128, 0)
        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
                                         sec)

        f.seek(138, 0)
        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
            'IHHHH')
        #~ print Num_Chan, Bytes

        #header version
        f.seek(175, 0)
        header_version, = f.read_f('b')
        assert header_version == 4

        seg = Segment(name=str(firstname + ' ' + surname),
                      file_origin=os.path.basename(self.filename))
        seg.annotate(surname=surname)
        seg.annotate(firstname=firstname)
        seg.annotate(rec_datetime=rec_datetime)

        if not cascade:
            f.close()
            return seg

        # area
        f.seek(176, 0)
        zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B',
                      'IMPED_E', 'MONTAGE',
                      'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
                      'EVENT B', 'TRIGGER']
        zones = {}
        for zname in zone_names:
            zname2, pos, length = f.read_f('8sII')
            zones[zname] = zname2, pos, length
            #~ print zname2, pos, length

        # reading raw data
        if not lazy:
            f.seek(Data_Start_Offset, 0)
            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
            rawdata = rawdata.reshape((-1, Num_Chan))

        # Reading Code Info
        zname2, pos, length = zones['ORDER']
        f.seek(pos, 0)
        code = np.fromstring(f.read(Num_Chan*2), dtype='u2', count=Num_Chan)

        units = {-1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent,
                 101: pq.dimensionless, 102: pq.dimensionless}

        for c in range(Num_Chan):
            zname2, pos, length = zones['LABCOD']
            f.seek(pos + code[c] * 128 + 2, 0)

            label = f.read(6).strip(b"\x00").decode('ascii')
            ground = f.read(6).strip(b"\x00").decode('ascii')
            (logical_min, logical_max, logical_ground, physical_min,
             physical_max) = f.read_f('iiiii')
            k, = f.read_f('h')
            if k in units.keys():
                unit = units[k]
            else:
                unit = pq.uV

            f.seek(8, 1)
            sampling_rate, = f.read_f('H') * pq.Hz
            sampling_rate *= Rate_Min

            if lazy:
                signal = [] * unit
            else:
                factor = float(physical_max - physical_min) / float(
                    logical_max - logical_min + 1)
                signal = (rawdata[:, c].astype(
                    'f') - logical_ground) * factor * unit

            ana_sig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                   name=str(label), channel_index=c)
            if lazy:
                ana_sig.lazy_shape = None
            ana_sig.annotate(ground=ground)

            seg.analogsignals.append(ana_sig)

        sampling_rate = np.mean(
            [ana_sig.sampling_rate for ana_sig in seg.analogsignals]) * pq.Hz

        # Read trigger and notes
        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            triggers = np.fromstring(f.read(length), dtype=[('pos', 'u4'), (
                'label', label_dtype)])
            if not lazy:
                keep = (triggers['pos'] >= triggers['pos'][0]) & (
                    triggers['pos'] < rawdata.shape[0]) & (
                    triggers['pos'] != 0)
                triggers = triggers[keep]
                ea = Event(name=zname[0] + zname[1:].lower(),
                           labels=triggers['label'].astype('S'),
                           times=(triggers['pos'] / sampling_rate).rescale('s'))
            else:
                ea = Event(name=zname[0] + zname[1:].lower())
                ea.lazy_shape = triggers.size
            seg.events.append(ea)

        # Read Event A and B
        # Not so well  tested
        for zname in ['EVENT A', 'EVENT B']:
            zname2, pos, length = zones[zname]
            f.seek(pos, 0)
            epochs = np.fromstring(f.read(length),
                                   dtype=[('label', 'u4'), ('start', 'u4'),
                                          ('stop', 'u4'), ])
            ep = Epoch(name=zname[0] + zname[1:].lower())
            if not lazy:
                keep = (epochs['start'] > 0) & (
                    epochs['start'] < rawdata.shape[0]) & (
                    epochs['stop'] < rawdata.shape[0])
                epochs = epochs[keep]
                ep = Epoch(name=zname[0] + zname[1:].lower(),
                           labels=epochs['label'].astype('S'),
                           times=(epochs['start'] / sampling_rate).rescale('s'),
                           durations=((epochs['stop'] - epochs['start']) / sampling_rate).rescale('s'))
            else:
                ep = Epoch(name=zname[0] + zname[1:].lower())
                ep.lazy_shape = triggers.size
            seg.epochs.append(ep)

        seg.create_many_to_one_relationship()
        f.close()
        return seg
Exemplo n.º 40
0
    def read_segment(self, blockname=None, lazy=False, cascade=True, sortname=''):
        """
        Read a single segment from the tank. Note that TDT blocks are Neo
        segments, and TDT tanks are Neo blocks, so here the 'blockname' argument
        refers to the TDT block's name, which will be the Neo segment name.
        sortname is used to specify the external sortcode generated by offline spike sorting, if sortname=='PLX',
        there should be a ./sort/PLX/*.SortResult file in the tdt block, which stores the sortcode for every spike,
        default to '', which uses the original online sort
        """
        if not blockname:
            blockname = os.listdir(self.dirname)[0]

        if blockname == 'TempBlk': return None

        if not self.is_tdtblock(blockname): return None    # if not a tdt block

        subdir = os.path.join(self.dirname, blockname)
        if not os.path.isdir(subdir): return None

        seg = Segment(name=blockname)

        tankname = os.path.basename(self.dirname)

        #TSQ is the global index
        tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq')
        dt = [('size','int32'),
                    ('evtype','int32'),
                    ('code','S4'),
                    ('channel','uint16'),
                    ('sortcode','uint16'),
                    ('timestamp','float64'),
                    ('eventoffset','int64'),
                    ('dataformat','int32'),
                    ('frequency','float32'),
                ]
        tsq = np.fromfile(tsq_filename, dtype=dt)

        #0x8801: 'EVTYPE_MARK' give the global_start
        global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0]

        #TEV is the old data file
        try:
            tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev')
            #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
            tev_array = np.fromfile(tev_filename, dtype='uint8')
        except IOError:
            tev_filename = None


        #if exists an external sortcode in ./sort/[sortname]/*.SortResult (generated after offline sortting)
        sortresult_filename = None
        if sortname is not '':
            try:
                for file in os.listdir(os.path.join(subdir, 'sort', sortname)):
                    if file.endswith(".SortResult"):
                        sortresult_filename = os.path.join(subdir, 'sort', sortname, file)

                        # get new sortcode
                        newsorcode = np.fromfile(sortresult_filename,'int8')[1024:]  # the first 1024 byte is file header
                        # update the sort code with the info from this file
                        tsq['sortcode'][1:-1]=newsorcode
                        # print('sortcode updated')
                        break
            except OSError:
                sortresult_filename = None
            except IOError:
                sortresult_filename = None


        for type_code, type_label in tdt_event_type:
            mask1 = tsq['evtype']==type_code
            codes = np.unique(tsq[mask1]['code'])

            for code in codes:
                mask2 = mask1 & (tsq['code']==code)
                channels = np.unique(tsq[mask2]['channel'])

                for channel in channels:
                    mask3 = mask2 & (tsq['channel']==channel)

                    if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                        if lazy:
                            times = [ ]*pq.s
                            labels = np.array([ ], dtype=str)
                        else:
                            times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
                            labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
                        ea = Event(times=times,
                                   name=code,
                                   channel_index=int(channel),
                                   labels=labels)
                        if lazy:
                            ea.lazy_shape = np.sum(mask3)
                        seg.events.append(ea)

                    elif type_label == 'EVTYPE_SNIP':
                        sortcodes = np.unique(tsq[mask3]['sortcode'])
                        for sortcode in sortcodes:
                            mask4 = mask3 & (tsq['sortcode']==sortcode)
                            nb_spike = np.sum(mask4)
                            sr = tsq[mask4]['frequency'][0]
                            waveformsize = tsq[mask4]['size'][0]-10
                            if lazy:
                                times = [ ]*pq.s
                                waveforms = None
                            else:
                                times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s
                                dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
                                waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt)
                                waveforms = waveforms.reshape(nb_spike, -1, waveformsize)
                                waveforms = waveforms * pq.mV
                            if nb_spike > 0:
                             #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                t_start = 0 *pq.s
                                t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s

                            else:
                                t_start = 0 *pq.s
                                t_stop = 0 *pq.s
                            st = SpikeTrain(times           = times,
                                            name            = 'Chan{0} Code{1}'.format(channel,sortcode),
                                            t_start         = t_start,
                                            t_stop          = t_stop,
                                            waveforms       = waveforms,
                                            left_sweep      = waveformsize/2./sr * pq.s,
                                            sampling_rate   = sr * pq.Hz,
                                            )
                            st.annotate(channel_index=channel)
                            if lazy:
                                st.lazy_shape = nb_spike
                            seg.spiketrains.append(st)

                    elif type_label == 'EVTYPE_STREAM':
                        dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
                        shape = np.sum(tsq[mask3]['size']-10)
                        sr = tsq[mask3]['frequency'][0]
                        if lazy:
                            signal = [ ]
                        else:
                            if PY3K:
                                signame = code.decode('ascii')
                            else:
                                signame = code
                            sev_filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(channel)+'.sev')
                            try:
                                #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                sig_array = np.fromfile(sev_filename, dtype='uint8')
                            except IOError:
                                sig_array = tev_array
                            signal = get_chunks(tsq[mask3]['size'],tsq[mask3]['eventoffset'],  sig_array).view(dt)

                        anasig = AnalogSignal(signal        = signal* pq.V,
                                              name          = '{0} {1}'.format(code, channel),
                                              sampling_rate = sr * pq.Hz,
                                              t_start       = (tsq[mask3]['timestamp'][0] - global_t_start) * pq.s,
                                              channel_index = int(channel)
                                              )
                        if lazy:
                            anasig.lazy_shape = shape
                        seg.analogsignals.append(anasig)
        return seg
Exemplo n.º 41
0
    def read_segment(self, lazy=False, cascade=True):

        # # Read header file (vhdr)
        header = read_brain_soup(self.filename)

        assert header['Common Infos'][
            'DataFormat'] == 'BINARY', NotImplementedError
        assert header['Common Infos'][
            'DataOrientation'] == 'MULTIPLEXED', NotImplementedError
        nb_channel = int(header['Common Infos']['NumberOfChannels'])
        sampling_rate = 1.e6 / float(
            header['Common Infos']['SamplingInterval']) * pq.Hz

        fmt = header['Binary Infos']['BinaryFormat']
        fmts = { 'INT_16':np.int16,  'INT_32':np.int32, 'IEEE_FLOAT_32':np.float32,}

        assert fmt in fmts, NotImplementedError
        dt = fmts[fmt]

        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        # read binary
        if not lazy:
            binary_file = os.path.splitext(self.filename)[0] + '.eeg'
            sigs = np.memmap(binary_file, dt, 'r', ).astype('f')

            n = int(sigs.size / nb_channel)
            sigs = sigs[:n * nb_channel]
            sigs = sigs.reshape(n, nb_channel)

        for c in range(nb_channel):
            name, ref, res, units = header['Channel Infos'][
                'Ch%d' % (c + 1,)].split(',')
            units = pq.Quantity(1, units.replace('µ', 'u'))
            if lazy:
                signal = [] * units
            else:
                signal = sigs[:,c]*units
                if dt == np.int16 or dt == np.int32:
                    signal *= np.float(res) 
            anasig = AnalogSignal(signal = signal,
                                                channel_index = c,
                                                name = name,
                                                sampling_rate = sampling_rate,
                                                )
            if lazy:
                anasig.lazy_shape = -1
            seg.analogsignals.append(anasig)

        # read marker
        marker_file = os.path.splitext(self.filename)[0] + '.vmrk'
        all_info = read_brain_soup(marker_file)['Marker Infos']
        all_types = []
        times = []
        labels = []
        for i in range(len(all_info)):
            type_, label, pos, size, channel = all_info[
                'Mk%d' % (i + 1,)].split(',')[:5]
            all_types.append(type_)
            times.append(float(pos) / sampling_rate.magnitude)
            labels.append(label)
        all_types = np.array(all_types)
        times = np.array(times) * pq.s
        labels = np.array(labels, dtype='S')
        for type_ in np.unique(all_types):
            ind = type_ == all_types
            if lazy:
                ea = Event(name=str(type_))
                ea.lazy_shape = -1
            else:
                ea = Event(
                    times=times[ind], labels=labels[ind], name=str(type_))
            seg.events.append(ea)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 42
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """
        Read in a segment.

        Arguments:
            load_spike_waveform : load or not waveform of spikes (default True)

        """

        fid = open(self.filename, 'rb')
        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(global_header['Year'],
                                             global_header['Month'],
                                             global_header['Day'],
                                             global_header['Hour'],
                                             global_header['Minute'],
                                             global_header['Second'])
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(plexon_version=global_header['Version'])

        for key, val in global_header.iteritems():
            seg.annotate(**{key: val})

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = spikes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(global_header['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']] = channelHeader
            maxunit = max(channelHeader['NUnits'], maxunit)
            maxchan = max(channelHeader['Channel'], maxchan)

            # event channel header
        eventHeaders = {}
        for _ in range(global_header['NumEventChannels']):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(global_header['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(
                offset=None)
            slowChannelHeaders[slowChannelHeader['Channel']] = \
                slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype='f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype='i')
        wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype='i')

        # eventarrays
        nb_events = {}
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() != -1:
            # read block header
            dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(
                offset=None)
            if dataBlockHeader is None:
                break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1, n2 = dataBlockHeader['NumberOfWaveforms'], dataBlockHeader[
                'NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp'] * 2. ** 32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan, unit] += 1
                wf_sizes[chan, unit, :] = [n1, n2]
                fid.seek(n1 * n2 * 2, 1)
            elif dataBlockHeader['Type'] == 4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2 * 2, 1)
                if n2 > 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] == 0:
                    t_starts[chan] = time

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = {}
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])

            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit],
                                                   dtype='f')
                if load_spike_waveform:
                    n1, n2 = wf_sizes[chan, unit, :]
                    swfarrays[chan, unit] = np.zeros(
                        (nb_spikes[chan, unit], n1, n2), dtype='f4')
            pos_spikes = np.zeros(nb_spikes.shape, dtype='i')

            # allocating mem for event
            eventpositions = {}
            evarrays = {}
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = {
                    'times': np.zeros(nb, dtype='f'),
                    'labels': np.zeros(nb, dtype='S4')
                }
                eventpositions[chan]=0 
                
            fid.seek(start)
            while fid.tell() != -1:
                dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(
                    offset=None)
                if dataBlockHeader is None:
                    break
                chan = dataBlockHeader['Channel']
                n1, n2 = dataBlockHeader['NumberOfWaveforms'], dataBlockHeader[
                    'NumberOfWordsInWaveform']
                time = dataBlockHeader['UpperByteOf5ByteTimestamp'] * \
                    2. ** 32 + dataBlockHeader['TimeStamp']
                time /= global_header['ADFrequency']

                if n2 < 0:
                    break
                if dataBlockHeader['Type'] == 1:
                    #spike
                    unit = dataBlockHeader['Unit']
                    pos = pos_spikes[chan, unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1 * n2 != 0:
                        swfarrays[chan, unit][pos, :, :] = np.fromstring(
                            fid.read(n1 * n2 * 2), dtype='i2'
                        ).reshape(n1, n2).astype('f4')
                    else:
                        fid.seek(n1 * n2 * 2, 1)
                    pos_spikes[chan, unit] += 1

                elif dataBlockHeader['Type'] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan]['times'][pos] = time
                    evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
                    eventpositions[chan]+= 1

                elif dataBlockHeader['Type'] == 5:
                    #signal
                    data = np.fromstring(
                        fid.read(n2 * 2), dtype='i2').astype('f4')
                    sigarrays[chan][sample_positions[chan]:
                                    sample_positions[chan]+data.size] = data
                    sample_positions[chan] += data.size


        ## Step 4: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
                labels = None
            else:
                times = evarrays[chan]['times']
                labels = evarrays[chan]['labels']
            ea = Event(
                times*pq.s,
                labels=labels,
                channel_name=eventHeaders[chan]['Name'],
                channel_index=chan
            )
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.events.append(ea)

        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = []
            else:
                if global_header['Version'] == 100 or global_header[
                        'Version'] == 101:
                    gain = 5000. / (
                        2048 * slowChannelHeaders[chan]['Gain'] * 1000.)
                elif global_header['Version'] == 102:
                    gain = 5000. / (2048 * slowChannelHeaders[chan]['Gain'] *
                                    slowChannelHeaders[chan]['PreampGain'])
                elif global_header['Version'] >= 103:
                    gain = global_header['SlowMaxMagnitudeMV'] / (
                        .5 * (2 ** global_header['BitsPerSpikeSample']) *
                        slowChannelHeaders[chan]['Gain'] *
                        slowChannelHeaders[chan]['PreampGain'])
                signal = sigarrays[chan] * gain
            anasig = AnalogSignal(
                signal * pq.V,
                sampling_rate=float(
                    slowChannelHeaders[chan]['ADFreq']) * pq.Hz,
                t_start=t_starts[chan] * pq.s,
                channel_index=slowChannelHeaders[chan]['Channel'],
                channel_name=slowChannelHeaders[chan]['Name'])
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)

        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0:
                continue
            if lazy:
                times = []
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan, unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if global_header['Version'] < 103:
                        gain = 3000. / (
                            2048 * dspChannelHeaders[chan]['Gain'] * 1000.)
                    elif global_header['Version'] >= 103 and global_header[
                            'Version'] < 105:
                        gain = global_header['SpikeMaxMagnitudeMV'] / (
                            .5 * 2. ** (global_header['BitsPerSpikeSample']) *
                            1000.)
                    elif global_header['Version'] > 105:
                        gain = global_header['SpikeMaxMagnitudeMV'] / (
                            .5 * 2. ** (global_header['BitsPerSpikeSample']) *
                            global_header['SpikePreAmpGain'])
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(
                times,
                units='s', 
                t_stop=t_stop*pq.s,
                waveforms=waveforms
            )
            sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
            sptr.annotate(channel_index = chan)
            for key, val in dspChannelHeaders[chan].iteritems():
                sptr.annotate(**{key: val})

            if lazy:
                sptr.lazy_shape = nb_spikes[chan, unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 43
0
    def read_block(self, lazy=False, cascade=True):

        header = self.read_header()
        version = header['fFileVersionNumber']

        bl = Block()
        bl.file_origin = os.path.basename(self.filename)
        bl.annotate(abf_version=str(version))

        # date and time
        if version < 2.:
            YY = 1900
            MM = 1
            DD = 1
            hh = int(header['lFileStartTime'] / 3600.)
            mm = int((header['lFileStartTime'] - hh * 3600) / 60)
            ss = header['lFileStartTime'] - hh * 3600 - mm * 60
            ms = int(np.mod(ss, 1) * 1e6)
            ss = int(ss)
        elif version >= 2.:
            YY = int(header['uFileStartDate'] / 10000)
            MM = int((header['uFileStartDate'] - YY * 10000) / 100)
            DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
            hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
            mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
            ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
            ms = int(np.mod(ss, 1) * 1e6)
            ss = int(ss)
        bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)

        if not cascade:
            return bl

        # file format
        if header['nDataFormat'] == 0:
            dt = np.dtype('i2')
        elif header['nDataFormat'] == 1:
            dt = np.dtype('f4')

        if version < 2.:
            nbchannel = header['nADCNumChannels']
            head_offset = header['lDataSectionPtr'] * BLOCKSIZE + header[
                'nNumPointsIgnored'] * dt.itemsize
            totalsize = header['lActualAcqLength']
        elif version >= 2.:
            nbchannel = header['sections']['ADCSection']['llNumEntries']
            head_offset = header['sections']['DataSection'][
                'uBlockIndex'] * BLOCKSIZE
            totalsize = header['sections']['DataSection']['llNumEntries']

        data = np.memmap(self.filename, dt, 'r',
                         shape=(totalsize,), offset=head_offset)

        # 3 possible modes
        if version < 2.:
            mode = header['nOperationMode']
        elif version >= 2.:
            mode = header['protocol']['nOperationMode']

        if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3):
            # event-driven variable-length mode (mode 1)
            # event-driven fixed-length mode (mode 2 or 5)
            # gap free mode (mode 3) can be in several episodes

            # read sweep pos
            if version < 2.:
                nbepisod = header['lSynchArraySize']
                offset_episode = header['lSynchArrayPtr'] * BLOCKSIZE
            elif version >= 2.:
                nbepisod = header['sections']['SynchArraySection'][
                    'llNumEntries']
                offset_episode = header['sections']['SynchArraySection'][
                    'uBlockIndex'] * BLOCKSIZE
            if nbepisod > 0:
                episode_array = np.memmap(
                    self.filename, [('offset', 'i4'), ('len', 'i4')], 'r',
                    shape=nbepisod, offset=offset_episode)
            else:
                episode_array = np.empty(1, [('offset', 'i4'), ('len', 'i4')])
                episode_array[0]['len'] = data.size
                episode_array[0]['offset'] = 0

            # sampling_rate
            if version < 2.:
                sampling_rate = 1. / (header['fADCSampleInterval'] *
                                      nbchannel * 1.e-6) * pq.Hz
            elif version >= 2.:
                sampling_rate = 1.e6 / \
                    header['protocol']['fADCSequenceInterval'] * pq.Hz

            # construct block
            # one sweep = one segment in a block
            pos = 0
            for j in range(episode_array.size):
                seg = Segment(index=j)

                length = episode_array[j]['len']

                if version < 2.:
                    fSynchTimeUnit = header['fSynchTimeUnit']
                elif version >= 2.:
                    fSynchTimeUnit = header['protocol']['fSynchTimeUnit']

                if (fSynchTimeUnit != 0) and (mode == 1):
                    length /= fSynchTimeUnit

                if not lazy:
                    subdata = data[pos:pos+length]
                    subdata = subdata.reshape((int(subdata.size/nbchannel),
                                               nbchannel)).astype('f')
                    if dt == np.dtype('i2'):
                        if version < 2.:
                            reformat_integer_v1(subdata, nbchannel, header)
                        elif version >= 2.:
                            reformat_integer_v2(subdata, nbchannel, header)

                pos += length

                if version < 2.:
                    chans = [chan_num for chan_num in
                             header['nADCSamplingSeq'] if chan_num >= 0]
                else:
                    chans = range(nbchannel)
                for n, i in enumerate(chans[:nbchannel]):  # fix SamplingSeq
                    if version < 2.:
                        name = header['sADCChannelName'][i].replace(b' ', b'')
                        unit = header['sADCUnits'][i].replace(b'\xb5', b'u').\
                            replace(b' ', b'').decode('utf-8')  # \xb5 is µ
                        num = header['nADCPtoLChannelMap'][i]
                    elif version >= 2.:
                        lADCIi = header['listADCInfo'][i]
                        name = lADCIi['ADCChNames'].replace(b' ', b'')
                        unit = lADCIi['ADCChUnits'].replace(b'\xb5', b'u').\
                            replace(b' ', b'').decode('utf-8')
                        num = header['listADCInfo'][i]['nADCNum']
                    if (fSynchTimeUnit == 0):
                        t_start = float(episode_array[j]['offset']) / sampling_rate
                    else:
                        t_start = float(episode_array[j]['offset']) * fSynchTimeUnit *1e-6* pq.s
                    t_start = t_start.rescale('s')
                    try:
                        pq.Quantity(1, unit)
                    except:
                        unit = ''

                    if lazy:
                        signal = [] * pq.Quantity(1, unit)
                    else:
                        signal = pq.Quantity(subdata[:, n], unit)

                    anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                          t_start=t_start,
                                          name=str(name),
                                          channel_index=int(num))
                    if lazy:
                        anaSig.lazy_shape = length / nbchannel
                    seg.analogsignals.append(anaSig)
                bl.segments.append(seg)

            if mode in [3, 5]:  # TODO check if tags exits in other mode
                # tag is EventArray that should be attached to Block
                # It is attched to the first Segment
                times = []
                labels = []
                comments = []
                for i, tag in enumerate(header['listTag']):
                    times.append(tag['lTagTime']/sampling_rate)
                    labels.append(str(tag['nTagType']))
                    comments.append(clean_string(tag['sComment']))
                times = np.array(times)
                labels = np.array(labels, dtype='S')
                comments = np.array(comments, dtype='S')
                # attach all tags to the first segment.
                seg = bl.segments[0]
                if lazy:
                    ea = Event(times=[] * pq.s, labels=np.array([], dtype='S'))
                    ea.lazy_shape = len(times)
                else:
                    ea = Event(times=times * pq.s, labels=labels,
                               comments=comments)
                seg.events.append(ea)

        bl.create_many_to_one_relationship()
        return bl
Exemplo n.º 44
0
    def read_segment(self, lazy=False, cascade=True):

        # # Read header file

        f = open(self.filename + '.ent', 'rU')
        #version
        version = f.readline()
        if version[:2] != 'V2' and version[:2] != 'V3':
            # raise('read only V2 .eeg.ent files')
            raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' %
                               version[:2])

        #info
        info1 = f.readline()[:-1]
        info2 = f.readline()[:-1]

        # strange 2 line for datetime
        #line1
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        YY, MM, DD, hh, mm, ss = (None, ) * 6
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]

        #line2
        l = f.readline()
        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
        r2 = re.findall('(\d+):(\d+):(\d+)', l)
        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
        if len(r1) != 0:
            DD, MM, YY, hh, mm, ss = r1[0]
        elif len(r2) != 0:
            hh, mm, ss = r2[0]
        elif len(r3) != 0:
            DD, MM, YY = r3[0]
        try:
            fulldatetime = datetime.datetime(int(YY), int(MM), int(DD),
                                             int(hh), int(mm), int(ss))
        except:
            fulldatetime = None

        seg = Segment(file_origin=os.path.basename(self.filename),
                      elan_version=version,
                      info1=info1,
                      info2=info2,
                      rec_datetime=fulldatetime)

        if not cascade:
            return seg

        l = f.readline()
        l = f.readline()
        l = f.readline()

        # sampling rate sample
        l = f.readline()
        sampling_rate = 1. / float(l) * pq.Hz

        # nb channel
        l = f.readline()
        nbchannel = int(l) - 2

        #channel label
        labels = []
        for c in range(nbchannel + 2):
            labels.append(f.readline()[:-1])

        # channel type
        types = []
        for c in range(nbchannel + 2):
            types.append(f.readline()[:-1])

        # channel unit
        units = []
        for c in range(nbchannel + 2):
            units.append(f.readline()[:-1])
        #print units

        #range
        min_physic = []
        for c in range(nbchannel + 2):
            min_physic.append(float(f.readline()))
        max_physic = []
        for c in range(nbchannel + 2):
            max_physic.append(float(f.readline()))
        min_logic = []
        for c in range(nbchannel + 2):
            min_logic.append(float(f.readline()))
        max_logic = []
        for c in range(nbchannel + 2):
            max_logic.append(float(f.readline()))

        #info filter
        info_filter = []
        for c in range(nbchannel + 2):
            info_filter.append(f.readline()[:-1])

        f.close()

        #raw data
        n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8)
        data = np.fromfile(self.filename, dtype='i' + str(n))
        data = data.byteswap().reshape(
            (data.size / (nbchannel + 2), nbchannel + 2)).astype('f4')
        for c in range(nbchannel):
            if lazy:
                sig = []
            else:
                sig = (data[:, c] - min_logic[c]) / (
                    max_logic[c] - min_logic[c]) * \
                    (max_physic[c] - min_physic[c]) + min_physic[c]

            try:
                unit = pq.Quantity(1, units[c])
            except:
                unit = pq.Quantity(1, '')

            ana_sig = AnalogSignal(
                sig * unit, sampling_rate=sampling_rate,
                t_start=0. * pq.s, name=labels[c], channel_index=c)
            if lazy:
                ana_sig.lazy_shape = data.shape[0]
            ana_sig.annotate(channel_name=labels[c])
            seg.analogsignals.append(ana_sig)

        # triggers
        f = open(self.filename + '.pos')
        times = []
        labels = []
        reject_codes = []
        for l in f.readlines():
            r = re.findall(' *(\d+) *(\d+) *(\d+) *', l)
            times.append(float(r[0][0]) / sampling_rate.magnitude)
            labels.append(str(r[0][1]))
            reject_codes.append(str(r[0][2]))
        if lazy:
            times = [] * pq.S
            labels = np.array([], dtype='S')
            reject_codes = []
        else:
            times = np.array(times) * pq.s
            labels = np.array(labels)
            reject_codes = np.array(reject_codes)
        ea = Event(times=times, labels=labels, reject_codes=reject_codes)
        if lazy:
            ea.lazy_shape = len(times)
        seg.events.append(ea)

        f.close()

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 45
0
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

                anaSig = AnalogSignal(signal,
                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
                                                    t_start = pdTime.value * pq.s,
                                                    name = str(entityInfo.szEntityLabel),
                                                    )
                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append( anaSig )


            #segment
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )

                if lazy:
                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp  = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID= ctypes.c_uint32(0)

                    nsample  = int(dwDataBufferSize)
                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                                ctypes.byref(pdwUnitID ) )

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
                    
                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
                                        t_stop = times.max(),
                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
                                        name = str(entityInfo.szEntityLabel),
                                        )
                seg.spiketrains.append(sptr)


            # neuralevent
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [ ]*pq.s
                    t_stop = 0*pq.s
                else:
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData*pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(times, t_stop =t_stop,
                                                name = str(entityInfo.szEntityLabel),)
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
Exemplo n.º 46
0
    def read_segment(self, lazy=False, cascade=True):
        fid = open(self.filename, 'rb')
        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        # ~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=global_header['version'])
        seg.annotate(comment=global_header['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(global_header['nvar']):
            entity_header = HeaderReader(fid, EntityHeader).read_f(
                offset=offset + i * 208)
            entity_header['name'] = entity_header['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entity_header['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    t_stop=global_header['tend'] /
                    global_header['freq'] * pq.s,
                    name=entity_header['name'])
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    event_times = event_times.astype('f8') / global_header[
                        'freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = Event(times=event_times, labels=labels,
                             channel_name=entity_header['name'])
                if lazy:
                    evar.lazy_shape = entity_header['n']
                seg.events.append(evar)

            if entity_header['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    start_times = start_times.astype('f8') / global_header[
                        'freq'] * pq.s
                    stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                           shape=(entity_header['n']),
                                           offset=entity_header['offset'] +
                                           entity_header['n'] * 4)
                    stop_times = stop_times.astype('f') / global_header[
                        'freq'] * pq.s
                epar = Epoch(times=start_times,
                             durations=stop_times - start_times,
                             labels=np.array([''] * start_times.size,
                                             dtype='S'),
                             channel_name=entity_header['name'])
                if lazy:
                    epar.lazy_shape = entity_header['n']
                seg.epochs.append(epar)

            if entity_header['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s

                    waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
                                          shape=(entity_header['n'], 1,
                                                 entity_header['NPointsWave']),
                                          offset=entity_header['offset'] +
                                          entity_header['n'] * 4)
                    waveforms = (waveforms.astype('f') *
                                 entity_header['ADtoMV'] +
                                 entity_header['MVOffset']) * pq.mV
                t_stop = global_header['tend'] / global_header['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/
                    #~ globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop, name=entity_header['name'],
                    waveforms=waveforms,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms)
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 4:
                # popvectors
                pass

            if entity_header['type'] == 5:
                # analog
                timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
                                       shape=(entity_header['n']),
                                       offset=entity_header['offset'])
                timestamps = timestamps.astype('f8') / global_header['freq']
                fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                fragment_starts = fragment_starts.astype('f8') / global_header[
                    'freq']
                t_start = timestamps[0] - fragment_starts[0] / float(
                    entity_header['WFrequency'])
                del timestamps, fragment_starts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(self.filename, np.dtype('i2'), 'r',
                                       shape=(entity_header['NPointsWave']),
                                       offset=entity_header['offset'])
                    signal = signal.astype('f')
                    signal *= entity_header['ADtoMV']
                    signal += entity_header['MVOffset']
                    signal = signal * pq.mV

                ana_sig = AnalogSignal(
                    signal=signal, t_start=t_start * pq.s,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    name=entity_header['name'],
                    channel_index=entity_header['WireNumber'])
                if lazy:
                    ana_sig.lazy_shape = entity_header['NPointsWave']
                seg.analogsignals.append(ana_sig)

            if entity_header['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                      shape=(entity_header['n']),
                                      offset=entity_header['offset'])
                    times = times.astype('f8') / global_header['freq'] * pq.s
                    fid.seek(entity_header['offset'] + entity_header['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename, np.dtype(
                            'S' + str(entity_header['MarkerLength'])),
                        'r', shape=(entity_header['n']),
                        offset=entity_header['offset'] +
                        entity_header['n'] * 4 + 64)
                ea = Event(times=times,
                           labels=labels.view(np.ndarray),
                           name=entity_header['name'],
                           channel_index=entity_header['WireNumber'],
                           marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entity_header['n']
                seg.events.append(ea)

        seg.create_many_to_one_relationship()
        return seg