def test_compute_orientation_tuning():
    from neo.core import SpikeTrain
    import quantities as pq
    from exana.stimulus.tools import (make_orientation_trials,
                                      compute_orientation_tuning)

    trials = [
        SpikeTrain(np.arange(0, 10, 2) * pq.s,
                   t_stop=10 * pq.s,
                   orient=315. * pq.deg),
        SpikeTrain(np.arange(0, 10, 0.5) * pq.s,
                   t_stop=10 * pq.s,
                   orient=np.pi / 3 * pq.rad),
        SpikeTrain(np.arange(0, 10, 1) * pq.s,
                   t_stop=10 * pq.s,
                   orient=0 * pq.deg),
        SpikeTrain(np.arange(0, 10, 0.3) * pq.s,
                   t_stop=10 * pq.s,
                   orient=np.pi / 3 * pq.rad)
    ]
    sorted_orients = np.array(
        [0, (np.pi / 3 * pq.rad).rescale(pq.deg) / pq.deg, 315]) * pq.deg
    rates_e = np.array([1., 2.7, 0.5]) / pq.s

    trials = make_orientation_trials(trials)
    rates, orients = compute_orientation_tuning(trials)
    assert ((rates == rates_e).all())
    assert (rates.units == rates_e.units)
    assert ((orients == sorted_orients).all())
    assert (orients.units == sorted_orients.units)
Example #2
0
def test_spike_argmax_zeros():
    train1 = SpikeTrain([] * s, t_stop=10)
    train2 = SpikeTrain([] * s, t_stop=10)
    trains = np.array([train1, train2])
    out = v.spike_argmax(trains)
    assert out.size == len(trains)
    assert out.sum() == 1
Example #3
0
 def read_segment(
     self,
     lazy=False,
     delimiter="\t",
     t_start=0. * pq.s,
     unit=pq.s,
 ):
     """
     delimiter is the columns delimiter in file  "\t" or one space or two space or "," or ";"
     t_start is the time start of all spiketrain 0 by default. unit is the unit of spike times,
     can be a str or directly a quantity.
     """
     assert not lazy, "Do not support lazy"
     unit = pq.Quantity(1, unit)
     seg = Segment(file_origin=os.path.basename(self.filename))
     f = open(self.filename, "Ur")
     for i, line in enumerate(f):
         alldata = line[:-1].split(delimiter)
         if alldata[-1] == "":
             alldata = alldata[:-1]
         if alldata[0] == "":
             alldata = alldata[1:]
         spike_times = np.array(alldata).astype("f")
         t_stop = spike_times.max() * unit
         sptr = SpikeTrain(spike_times * unit,
                           t_start=t_start,
                           t_stop=t_stop)
         sptr.annotate(channel_index=i)
         seg.spiketrains.append(sptr)
     f.close()
     seg.create_many_to_one_relationship()
     return seg
def test_make_orientation_trials():
    from neo.core import SpikeTrain
    from exana.stimulus.tools import (make_orientation_trials,
                                      _convert_string_to_quantity_scalar)

    trials = [
        SpikeTrain(np.arange(0, 10, 2) * pq.s,
                   t_stop=10 * pq.s,
                   orient=315. * pq.deg),
        SpikeTrain(np.arange(0, 10, 0.5) * pq.s,
                   t_stop=10 * pq.s,
                   orient=np.pi / 3 * pq.rad),
        SpikeTrain(np.arange(0, 10, 1) * pq.s,
                   t_stop=10 * pq.s,
                   orient=0 * pq.deg),
        SpikeTrain(np.arange(0, 10, 0.3) * pq.s,
                   t_stop=10 * pq.s,
                   orient=np.pi / 3 * pq.rad)
    ]

    sorted_trials = [[trials[2]], [trials[1], trials[3]], [trials[0]]]
    sorted_orients = [
        0 * pq.deg, (np.pi / 3 * pq.rad).rescale(pq.deg), 315 * pq.deg
    ]
    orient_trials = make_orientation_trials(trials, unit=pq.deg)

    for (key, value), trial, orient in zip(orient_trials.items(),
                                           sorted_trials, sorted_orients):
        key = _convert_string_to_quantity_scalar(key)
        assert (key == orient.magnitude)
        for t, st in zip(value, trial):
            assert ((t == st).all())
            assert (t.t_start == st.t_start)
            assert (t.t_stop == st.t_stop)
            assert (t.annotations["orient"] == orient)
Example #5
0
def test_spike_argmax_indexed():
    train1 = SpikeTrain([3, 4] * s, t_stop=10)
    train2 = SpikeTrain([5, 6] * s, t_stop=10)
    trains = np.array([train1, train2])
    out = v.spike_argmax(trains)
    expected = np.array([1, 0])
    assert np.array_equal(out, expected)
Example #6
0
def proc_src_condition_unit_repetition(sweep, damaIndex, timeStamp, sweepLen,
                                       side, ADperiod, respWin, filename):
    '''Get the repetion for a unit in a condition in a src file that has been
    processed by the official matlab function.  See proc_src for details'''
    damaIndex = damaIndex.astype('int32')
    if len(sweep):
        times = np.array([res[0, 0] for res in sweep['time']])
        shapes = np.concatenate([res.flatten()[np.newaxis][np.newaxis] for res
                                 in sweep['shape']], axis=0)
        trig2 = np.array([res[0, 0] for res in sweep['trig2']])
    else:
        times = np.array([])
        shapes = np.array([[[]]])
        trig2 = np.array([])

    times = pq.Quantity(times, units=pq.ms, dtype=np.float32)
    t_start = pq.Quantity(0, units=pq.ms, dtype=np.float32)
    t_stop = pq.Quantity(sweepLen, units=pq.ms, dtype=np.float32)
    trig2 = pq.Quantity(trig2, units=pq.ms, dtype=np.uint8)
    waveforms = pq.Quantity(shapes, dtype=np.int8, units=pq.mV)
    sampling_period = pq.Quantity(ADperiod, units=pq.us)

    train = SpikeTrain(times=times, t_start=t_start, t_stop=t_stop,
                       trig2=trig2, dtype=np.float32, timestamp=timeStamp,
                       dama_index=damaIndex, side=side, copy=True,
                       respwin=respWin, waveforms=waveforms,
                       file_origin=filename)
    train.annotations['side'] = side
    train.sampling_period = sampling_period
    return train
Example #7
0
    def test__construct_subsegment_by_unit(self):
        nb_seg = 3
        nb_unit = 7
        unit_with_sig = [0, 2, 5]
        signal_types = ['Vm', 'Conductances']
        sig_len = 100

        #recordingchannelgroups
        rcgs = [ RecordingChannelGroup(name = 'Vm', channel_indexes = unit_with_sig),
                        RecordingChannelGroup(name = 'Conductance', channel_indexes = unit_with_sig), ]

        # Unit
        all_unit = [ ]
        for u in range(nb_unit):
            un = Unit(name = 'Unit #%d' % u, channel_indexes = [u])
            all_unit.append(un)

        bl = Block()
        for s in range(nb_seg):
            seg = Segment(name = 'Simulation %s' % s)
            for j in range(nb_unit):
                st = SpikeTrain([1, 2, 3], units = 'ms', t_start = 0., t_stop = 10)
                st.unit = all_unit[j]

            for t in signal_types:
                anasigarr = AnalogSignalArray( np.zeros((sig_len, len(unit_with_sig)) ), units = 'nA',
                                sampling_rate = 1000.*pq.Hz, channel_indexes = unit_with_sig )
                seg.analogsignalarrays.append(anasigarr)

        # what you want
        subseg = seg.construct_subsegment_by_unit(all_unit[:4])
Example #8
0
def proc_src_condition_unit_repetition(sweep, damaIndex, timeStamp, sweepLen,
                                       side, ADperiod, respWin, filename):
    '''Get the repetion for a unit in a condition in a src file that has been
    processed by the official matlab function.  See proc_src for details'''
    damaIndex = damaIndex.astype('int32')
    if len(sweep):
        times = np.array([res[0, 0] for res in sweep['time']])
        shapes = np.concatenate([res.flatten()[np.newaxis][np.newaxis] for res
                                 in sweep['shape']], axis=0)
        trig2 = np.array([res[0, 0] for res in sweep['trig2']])
    else:
        times = np.array([])
        shapes = np.array([[[]]])
        trig2 = np.array([])

    times = pq.Quantity(times, units=pq.ms, dtype=np.float32)
    t_start = pq.Quantity(0, units=pq.ms, dtype=np.float32)
    t_stop = pq.Quantity(sweepLen, units=pq.ms, dtype=np.float32)
    trig2 = pq.Quantity(trig2, units=pq.ms, dtype=np.uint8)
    waveforms = pq.Quantity(shapes, dtype=np.int8, units=pq.mV)
    sampling_period = pq.Quantity(ADperiod, units=pq.us)

    train = SpikeTrain(times=times, t_start=t_start, t_stop=t_stop,
                       trig2=trig2, dtype=np.float32, timestamp=timeStamp,
                       dama_index=damaIndex, side=side, copy=True,
                       respwin=respWin, waveforms=waveforms,
                       file_origin=filename)
    train.annotations['side'] = side
    train.sampling_period = sampling_period
    return train
Example #9
0
 def test_annotations(self):
     self.testfilename = self.get_filename_path('nixio_fr_ann.nix')
     with NixIO(filename=self.testfilename, mode='ow') as io:
         annotations = {'my_custom_annotation': 'hello block'}
         bl = Block(**annotations)
         annotations = {'something': 'hello hello000'}
         seg = Segment(**annotations)
         an =AnalogSignal([[1, 2, 3], [4, 5, 6]], units='V',
                                     sampling_rate=1*pq.Hz)
         an.annotations['ansigrandom'] = 'hello chars'
         sp = SpikeTrain([3, 4, 5]* s, t_stop=10.0)
         sp.annotations['railway'] = 'hello train'
         ev = Event(np.arange(0, 30, 10)*pq.Hz,
                    labels=np.array(['trig0', 'trig1', 'trig2'], dtype='S'))
         ev.annotations['venue'] = 'hello event'
         ev2 = Event(np.arange(0, 30, 10) * pq.Hz,
                    labels=np.array(['trig0', 'trig1', 'trig2'], dtype='S'))
         ev2.annotations['evven'] = 'hello ev'
         seg.spiketrains.append(sp)
         seg.events.append(ev)
         seg.events.append(ev2)
         seg.analogsignals.append(an)
         bl.segments.append(seg)
         io.write_block(bl)
         io.close()
     with NixIOfr(filename=self.testfilename) as frio:
         frbl = frio.read_block()
         assert 'my_custom_annotation' in frbl.annotations
         assert 'something' in frbl.segments[0].annotations
         # assert 'ansigrandom' in frbl.segments[0].analogsignals[0].annotations
         assert 'railway' in frbl.segments[0].spiketrains[0].annotations
         assert 'venue' in frbl.segments[0].events[0].annotations
         assert 'evven' in frbl.segments[0].events[1].annotations
     os.remove(self.testfilename)
Example #10
0
    def read_spiketrain(fh, block_id, array_id):
        nix_block = fh.handle.blocks[block_id]
        nix_da = nix_block.data_arrays[array_id]

        params = {
            'times': nix_da[:],  # TODO think about lazy data loading
            'dtype': nix_da.dtype,
            't_start': Reader.Help.read_quantity(nix_da.metadata, 't_start'),
            't_stop': Reader.Help.read_quantity(nix_da.metadata, 't_stop')
        }

        name = Reader.Help.get_obj_neo_name(nix_da)
        if name:
            params['name'] = name

        if 'left_sweep' in nix_da.metadata:
            params['left_sweep'] = Reader.Help.read_quantity(nix_da.metadata, 'left_sweep')

        if len(nix_da.dimensions) > 0:
            s_dim = nix_da.dimensions[0]
            params['sampling_rate'] = s_dim.sampling_interval * getattr(pq, s_dim.unit)

        if nix_da.unit:
            params['units'] = nix_da.unit

        st = SpikeTrain(**params)

        for key, value in Reader.Help.read_attributes(nix_da.metadata, 'spiketrain').items():
            setattr(st, key, value)

        st.annotations = Reader.Help.read_annotations(nix_da.metadata, 'spiketrain')

        return st
 def _handle_processing_group(self, block):
     # todo: handle other modules than Units
     units_group = self._file.get('processing/Units/UnitTimes')
     segment_map = dict(
         (segment.name, segment) for segment in block.segments)
     for name, group in units_group.items():
         if name == 'unit_list':
             pass  # todo
         else:
             segment_name = group['source'].value
             #desc = group['unit_description'].value  # use this to store Neo Unit id?
             segment = segment_map[segment_name]
             if self._lazy:
                 times = np.array(())
                 lazy_shape = group['times'].shape
             else:
                 times = group['times'].value
             spiketrain = SpikeTrain(
                 times,
                 units=pq.second,
                 t_stop=group['t_stop'].value * pq.second
             )  # todo: this is a custom Neo value, general NWB files will not have this - use segment.t_stop instead in that case?
             if self._lazy:
                 spiketrain.lazy_shape = lazy_shape
             spiketrain.segment = segment
             segment.spiketrains.append(spiketrain)
Example #12
0
def test_spike_argmax_randomised():
    train1 = SpikeTrain([3, 4] * s, t_stop=10)
    train2 = SpikeTrain([5, 6] * s, t_stop=10)
    trains = np.array([train1, train2])
    out = v.spike_argmax_randomise(trains)
    assert out.size == len(trains)
    assert out.sum() == 1
Example #13
0
def test_spike_argmax_regular():
    train1 = SpikeTrain([2, 20] * s, t_stop=100)
    train2 = SpikeTrain([31, 40, 61] * s, t_stop=100)
    train3 = SpikeTrain([1, 4] * s, t_stop=100)
    train4 = SpikeTrain([1, 47] * s, t_stop=100)
    trains = np.array([train1, train2, train3, train4])
    out = v.spike_argmax(trains)
    assert np.array_equal(out, np.array([0, 1, 0, 0]))
Example #14
0
 def _read_spiketrain(self, node, parent):
     attributes = self._get_standard_attributes(node)
     t_start = self._get_quantity(node["t_start"])
     t_stop = self._get_quantity(node["t_stop"])
     # todo: handle sampling_rate, waveforms, left_sweep
     spiketrain = SpikeTrain(self._get_quantity(node["times"]),
                             t_start=t_start, t_stop=t_stop,
                             **attributes)
     spiketrain.segment = parent
     self.object_refs[node.attrs["object_ref"]] = spiketrain
     return spiketrain
Example #15
0
def spiketrain_from_raw(channel: AnalogSignal, threshold: Quantity) -> SpikeTrain:
    assert channel.signal.shape[1] == 1
    spikes = find_spikes(channel, threshold)
    signal = channel.signal.squeeze()
    times = np.array([channel_index_to_time(channel, start) for start, _ in spikes]) * channel.sampling_period.units
    waveforms = np.array([[signal[start:stop]] for start, stop in spikes]) * channel.units
    name = f"{channel.name}#{threshold}"
    result = SpikeTrain(name=name, times=times, units=channel.units, t_start=channel.t_start, t_stop=channel.t_stop,
                        waveforms=waveforms, sampling_rate=channel.sampling_rate, sort=True)
    result.annotate(from_channel=channel.name, threshold=threshold)
    return result
Example #16
0
 def _read_spiketrain(self, node, parent):
     attributes = self._get_standard_attributes(node)
     t_start = self._get_quantity(node["t_start"])
     t_stop = self._get_quantity(node["t_stop"])
     # todo: handle sampling_rate, waveforms, left_sweep
     spiketrain = SpikeTrain(self._get_quantity(node["times"]),
                             t_start=t_start, t_stop=t_stop,
                             **attributes)
     spiketrain.segment = parent
     self.object_refs[node.attrs["object_ref"]] = spiketrain
     return spiketrain
Example #17
0
 def _extract_spikes(self, data, metadata, channel_index):
     spiketrain = None
     spike_times = self._extract_array(data, channel_index)
     if len(spike_times) > 0:
         spiketrain = SpikeTrain(spike_times,
                                 units=pq.ms,
                                 t_stop=spike_times.max())
         spiketrain.annotate(label=metadata["label"],
                             channel_index=channel_index,
                             dt=metadata["dt"])
         return spiketrain
Example #18
0
 def _extract_spikes(self, data, metadata, channel_index, lazy):
     spiketrain = None
     if lazy:
         if channel_index in data[:, 1]:
             spiketrain = SpikeTrain([], units=pq.ms, t_stop=0.0)
             spiketrain.lazy_shape = None
     else:
         spike_times = self._extract_array(data, channel_index)
         if len(spike_times) > 0:
             spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())
     if spiketrain is not None:
         spiketrain.annotate(label=metadata["label"],
                             channel_index=channel_index,
                             dt=metadata["dt"])
         return spiketrain
Example #19
0
 def _align_to_zero(self, spiketrains=None):
     if spiketrains is None:
         spiketrains = self.spiketrains
     t_lims = [(st.t_start, st.t_stop) for st in spiketrains]
     tmin = min(t_lims, key=lambda f: f[0])[0]
     tmax = max(t_lims, key=lambda f: f[1])[1]
     unit = spiketrains[0].units
     for count, spiketrain in enumerate(spiketrains):
         annotations = spiketrain.annotations
         spiketrains[count] = SpikeTrain(
             np.array(spiketrain.tolist()) * unit - tmin,
             t_start=0 * unit,
             t_stop=tmax - tmin)
         spiketrains[count].annotations = annotations
     return spiketrains
Example #20
0
    def read_spiketrain(self ,
                                            # the 2 first key arguments are imposed by neo.io API
                                            lazy = False,
                                            cascade = True,

                                                segment_duration = 15.,
                                                t_start = -1,
                                                channel_index = 0,
                                                ):
        """
        With this IO SpikeTrain can e acces directly with its channel number
        """
        # There are 2 possibles behaviour for a SpikeTrain
        # holding many Spike instance or directly holding spike times
        # we choose here the first :
        if not HAVE_SCIPY:
            raise SCIPY_ERR

        num_spike_by_spiketrain = 40
        sr = 10000.

        if lazy:
            times = [ ]
        else:
            times = (np.random.rand(num_spike_by_spiketrain)*segment_duration +
                     t_start)

        # create a spiketrain
        spiketr = SpikeTrain(times, t_start = t_start*pq.s, t_stop = (t_start+segment_duration)*pq.s ,
                                            units = pq.s,
                                            name = 'it is a spiketrain from exampleio',
                                            )

        if lazy:
            # we add the attribute lazy_shape with the size if loaded
            spiketr.lazy_shape = (num_spike_by_spiketrain,)

        # ours spiketrains also hold the waveforms:

        # 1 generate a fake spike shape (2d array if trodness >1)
        w1 = -stats.nct.pdf(np.arange(11,60,4), 5,20)[::-1]/3.
        w2 = stats.nct.pdf(np.arange(11,60,2), 5,20)
        w = np.r_[ w1 , w2 ]
        w = -w/max(w)

        if not lazy:
            # in the neo API the waveforms attr is 3 D in case tetrode
            # in our case it is mono electrode so dim 1 is size 1
            waveforms  = np.tile( w[np.newaxis,np.newaxis,:], ( num_spike_by_spiketrain ,1, 1) )
            waveforms *=  np.random.randn(*waveforms.shape)/6+1
            spiketr.waveforms = waveforms*pq.mV
            spiketr.sampling_rate = sr * pq.Hz
            spiketr.left_sweep = 1.5* pq.s

        # for attributes out of neo you can annotate
        spiketr.annotate(channel_index = channel_index)

        return spiketr
Example #21
0
def output_fire_freq(spiketime_dict,isi_binsize,isibins,max_time):
    import elephant
    from neo.core import AnalogSignal,SpikeTrain
    import quantities as q
    ratebins=np.arange(0,np.ceil(max_time),isi_binsize)
    #spike_rate: across entire time
    spike_rate={key:np.zeros((len(spike_set),len(ratebins))) for key,spike_set in spiketime_dict.items()}
    spike_rate_vs_time_mean={};spike_rate_vs_time_std={}
    #spike_freq: segmented into pre,stim,post
    spike_freq={key:{} for key in spiketime_dict.keys()}
    spike_freq_mean={key:{} for key in spiketime_dict.keys()}
    spike_freq_std={key:{} for key in spiketime_dict.keys()}
    for key,spike_set in spiketime_dict.items(): #iterate over different stimulation conditions
        for i in range(len(spike_set)): #iterate over trials
            train=SpikeTrain(spike_set[i]*q.s,t_stop=np.ceil(max_time)*q.s)
            spike_rate[key][i]=elephant.statistics.instantaneous_rate(train,isi_binsize*q.s).magnitude[:,0]#/len(spike_set)
    for key,rate_set in spike_rate.items(): #separate out the spike_rate into pre, post, and stimulation time frames
        for pre_post,binlist in isibins.items():
            binmin_idx=np.abs(ratebins-binlist[0]).argmin()
            binmax_idx=np.abs(ratebins-(binlist[-1]+isi_binsize)).argmin()
            spike_freq[key][pre_post]=spike_rate[key][:,binmin_idx:binmax_idx]
        spike_rate_vs_time_mean[key]=np.mean(spike_rate[key],axis=0) #average across trials
        spike_rate_vs_time_std[key]=np.std(spike_rate[key],axis=0) #std across trials
        for pre_post,binlist in isibins.items():
            spike_freq_mean[key][pre_post]=np.mean(spike_freq[key][pre_post],axis=0)
            spike_freq_std[key][pre_post]=np.std(spike_freq[key][pre_post],axis=0)
    return spike_freq_mean,spike_freq_std,spike_rate_vs_time_mean,spike_rate_vs_time_std,ratebins
Example #22
0
 def __init__(self,
              clusterID,
              group,
              spikeTimes,
              waveForm=None,
              shank=None,
              maxChannel=None,
              coordinates=None,
              firingRate=None):
     super(Unit, self).__init__(name=str(clusterID))
     self.clusterID = clusterID
     self.group = group
     if hasattr(spikeTimes, 'times'):
         self.spiketrains.append(spikeTimes)
     else:
         newSpiketrain = SpikeTrain(spikeTimes,
                                    units='sec',
                                    t_stop=max(spikeTimes))
         self.spiketrains.append(newSpiketrain)
     self.waveForm = waveForm
     self.shank = shank
     self.maxChannel = maxChannel
     self.coordinates = coordinates
     self.firingRate = firingRate
     self.mergedDuplicates = False
     self.nrMergedDuplicateSpikes = 0
Example #23
0
 def remove_short_ISIs(self, clusterID, ISIThreshold):
     '''
     Removes second spike in cases where ISI is less than a given threshold.
     WARNING: Modifies cluster in-place.
     :param clusterID: ID of cluster to be modified
     :param ISIThreshold: spikes with ISIs less than threshold will be removed
     :return: N/A
     '''
     # TODO: if we have an ISI below threshold, do we have to remove BOTH spikes?
     # if duplicate spike times due to cluster misalignment, then no...
     # if contamination then yes (we do not know which spike is `correct`)
     spikeTimes = self.clusters[clusterID].spiketrains[0].times.magnitude
     dt = np.zeros(len(spikeTimes))
     dt[:-1] = np.diff(spikeTimes)
     dt[-1] = 1e6
     duplicateSpikeDelays = spikeTimes[np.where(np.abs(dt) <= ISIThreshold)]
     keepSpikeTimes = spikeTimes[np.where(np.abs(dt) > ISIThreshold)]
     keepAmplitudes = self.clusters[clusterID].templateAmplitudes[np.where(
         np.abs(dt) > ISIThreshold)]
     spikeTrain = SpikeTrain(keepSpikeTimes,
                             units='sec',
                             t_stop=max(spikeTimes),
                             t_start=min(0, min(spikeTimes)))
     self.clusters[clusterID].spiketrains[0] = spikeTrain
     self.clusters[clusterID].templateAmplitudes = keepAmplitudes
     self.clusters[clusterID].mergedDuplicates = True
     self.clusters[clusterID].nrMergedDuplicateSpikes = len(
         duplicateSpikeDelays)
Example #24
0
    def __save_segment(self):
        '''
        Write the segment to the Block if it exists
        '''
        # if this is the beginning of the first condition, then we don't want
        # to save, so exit
        # but set __seg from None to False so we know next time to create a
        # segment even if there are no spike in the condition
        if self.__seg is None:
            self.__seg = False
            return

        if not self.__seg:
            # create dummy values if there are no SpikeTrains in this condition
            self.__seg = Segment(file_origin=self._filename, **self.__params)
            self.__spiketimes = []

        times = pq.Quantity(self.__spiketimes, dtype=np.float32, units=pq.ms)
        train = SpikeTrain(times,
                           t_start=0 * pq.ms,
                           t_stop=self.__t_stop * pq.ms,
                           file_origin=self._filename)

        self.__seg.spiketrains = [train]
        self.__unit.spiketrains.append(train)
        self._blk.segments.append(self.__seg)

        # set an empty segment
        # from now on, we need to set __seg to False rather than None so
        # that if there is a condition with no SpikeTrains we know
        # to create an empty Segment
        self.__seg = False
Example #25
0
 def load(self, time_slice=None, strict_slicing=True):
     """
     Load SpikeTrainProxy args:
         :param time_slice: None or tuple of the time slice expressed with quantities.
                         None is the entire spike train.
         :param strict_slicing: True by default.
             Control if an error is raised or not when one of the time_slice members
             (t_start or t_stop) is outside the real time range of the segment.
     """
     interval = None
     if time_slice:
         interval = (float(t)
                     for t in time_slice)  # convert from quantities
     spike_times = self._units_table.get_unit_spike_times(
         self.id, in_interval=interval)
     return SpikeTrain(
         spike_times * self.units,
         self.t_stop,
         units=self.units,
         # sampling_rate=array(1.) * Hz,
         t_start=self.t_start,
         # waveforms=None,
         # left_sweep=None,
         name=self.name,
         # file_origin=None,
         # description=None,
         # array_annotations=None,
         **self.annotations)
Example #26
0
def train_to_neo(train, framerate=24):
  ''' convert a single spike train to Neo SpikeTrain '''
  times = np.nonzero(train)[0]
  times = times / framerate * pq.s
  t_stop = train.shape[-1] / framerate * pq.s
  spike_train = SpikeTrain(times=times, units=pq.s, t_stop=t_stop)
  return spike_train
Example #27
0
    def test__construct_subsegment_by_unit(self):
        nb_seg = 3
        nb_unit = 7
        unit_with_sig = np.array([0, 2, 5])
        signal_types = ['Vm', 'Conductances']
        sig_len = 100

        # channelindexes
        chxs = [ChannelIndex(name='Vm',
                             index=unit_with_sig),
                ChannelIndex(name='Conductance',
                             index=unit_with_sig)]

        # Unit
        all_unit = []
        for u in range(nb_unit):
            un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u]))
            assert_neo_object_is_compliant(un)
            all_unit.append(un)

        blk = Block()
        blk.channel_indexes = chxs
        for s in range(nb_seg):
            seg = Segment(name='Simulation %s' % s)
            for j in range(nb_unit):
                st = SpikeTrain([1, 2], units='ms',
                                t_start=0., t_stop=10)
                st.unit = all_unit[j]

            for t in signal_types:
                anasigarr = AnalogSignal(np.zeros((sig_len,
                                                        len(unit_with_sig))),
                                              units='nA',
                                              sampling_rate=1000.*pq.Hz,
                                              channel_indexes=unit_with_sig)
                seg.analogsignals.append(anasigarr)

        blk.create_many_to_one_relationship()
        for unit in all_unit:
            assert_neo_object_is_compliant(unit)
        for chx in chxs:
            assert_neo_object_is_compliant(chx)
        assert_neo_object_is_compliant(blk)

        # what you want
        newseg = seg.construct_subsegment_by_unit(all_unit[:4])
        assert_neo_object_is_compliant(newseg)
Example #28
0
    def test__construct_subsegment_by_unit(self):
        nb_seg = 3
        nb_unit = 7
        unit_with_sig = np.array([0, 2, 5])
        signal_types = ['Vm', 'Conductances']
        sig_len = 100

        # channelindexes
        chxs = [ChannelIndex(name='Vm',
                             index=unit_with_sig),
                ChannelIndex(name='Conductance',
                             index=unit_with_sig)]

        # Unit
        all_unit = []
        for u in range(nb_unit):
            un = Unit(name='Unit #%d' % u, channel_indexes=np.array([u]))
            assert_neo_object_is_compliant(un)
            all_unit.append(un)

        blk = Block()
        blk.channel_indexes = chxs
        for s in range(nb_seg):
            seg = Segment(name='Simulation %s' % s)
            for j in range(nb_unit):
                st = SpikeTrain([1, 2], units='ms',
                                t_start=0., t_stop=10)
                st.unit = all_unit[j]

            for t in signal_types:
                anasigarr = AnalogSignal(np.zeros((sig_len,
                                                   len(unit_with_sig))),
                                         units='nA',
                                         sampling_rate=1000. * pq.Hz,
                                         channel_indexes=unit_with_sig)
                seg.analogsignals.append(anasigarr)

        blk.create_many_to_one_relationship()
        for unit in all_unit:
            assert_neo_object_is_compliant(unit)
        for chx in chxs:
            assert_neo_object_is_compliant(chx)
        assert_neo_object_is_compliant(blk)

        # what you want
        newseg = seg.construct_subsegment_by_unit(all_unit[:4])
        assert_neo_object_is_compliant(newseg)
Example #29
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))
        cls.populate_dates(blk)

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        cls.populate_dates(seg)
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times,
                                        signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
Example #30
0
    def test_multiref_write(self):
        blk = Block("blk1")
        signal = AnalogSignal(name="sig1",
                              signal=[0, 1, 2],
                              units="mV",
                              sampling_period=pq.Quantity(1, "ms"))
        othersignal = IrregularlySampledSignal(name="i1",
                                               signal=[0, 0, 0],
                                               units="mV",
                                               times=[1, 2, 3],
                                               time_units="ms")
        event = Event(name="Evee", times=[0.3, 0.42], units="year")
        epoch = Epoch(name="epoche",
                      times=[0.1, 0.2] * pq.min,
                      durations=[0.5, 0.5] * pq.min)
        st = SpikeTrain(name="the train of spikes",
                        times=[0.1, 0.2, 10.3],
                        t_stop=11,
                        units="us")

        for idx in range(3):
            segname = "seg" + str(idx)
            seg = Segment(segname)
            blk.segments.append(seg)
            seg.analogsignals.append(signal)
            seg.irregularlysampledsignals.append(othersignal)
            seg.events.append(event)
            seg.epochs.append(epoch)
            seg.spiketrains.append(st)

        chidx = ChannelIndex([10, 20, 29])
        seg = blk.segments[0]
        st = SpikeTrain(name="choochoo",
                        times=[10, 11, 80],
                        t_stop=1000,
                        units="s")
        seg.spiketrains.append(st)
        blk.channel_indexes.append(chidx)
        for idx in range(6):
            unit = Unit("unit" + str(idx))
            chidx.units.append(unit)
            unit.spiketrains.append(st)

        self.writer.write_block(blk)
        self.compare_blocks([blk], self.reader.blocks)
Example #31
0
 def test_read_spiketrain_using_eager(self):
     io = self.io_cls(self.test_file)
     st3 = io.read_spiketrain(lazy=False, channel_index=3)
     self.assertIsInstance(st3, SpikeTrain)
     assert_arrays_equal(st3,
                         SpikeTrain(np.arange(3, 104, dtype=float),
                                    t_start=0*pq.s,
                                    t_stop=104*pq.s,
                                    units=pq.ms))
    def read_segment(
        self,
        lazy=False,
        cascade=True,
        delimiter='\t',
        t_start=0. * pq.s,
        unit=pq.s,
    ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            t_start : time start of all spiketrain 0 by default
            unit : unit of spike times, can be a str or directly a Quantities
        """
        unit = pq.Quantity(1, unit)

        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        f = open(self.filename, 'Ur')
        for i, line in enumerate(f):
            alldata = line[:-1].split(delimiter)
            if alldata[-1] == '': alldata = alldata[:-1]
            if alldata[0] == '': alldata = alldata[1:]
            if lazy:
                spike_times = []
                t_stop = t_start
            else:
                spike_times = np.array(alldata).astype('f')
                t_stop = spike_times.max() * unit

            sptr = SpikeTrain(spike_times * unit,
                              t_start=t_start,
                              t_stop=t_stop)
            if lazy:
                sptr.lazy_shape = len(alldata)

            sptr.annotate(channel_index=i)
            seg.spiketrains.append(sptr)
        f.close()

        seg.create_many_to_one_relationship()
        return seg
Example #33
0
    def read_spiketrain(self ,
                                            # the 2 first key arguments are imposed by neo.io API
                                            lazy = False,
                                            cascade = True,

                                                segment_duration = 15.,
                                                t_start = -1,
                                                channel_index = 0,
                                                ):
        """
        With this IO SpikeTrain can e acces directly with its channel number
        """
        # There are 2 possibles behaviour for a SpikeTrain
        # holding many Spike instance or directly holding spike times
        # we choose here the first :
        if not HAVE_SCIPY:
            raise SCIPY_ERR

        num_spike_by_spiketrain = 40
        sr = 10000.

        if lazy:
            times = [ ]
        else:
            times = (np.random.rand(num_spike_by_spiketrain)*segment_duration +
                     t_start)

        # create a spiketrain
        spiketr = SpikeTrain(times, t_start = t_start*pq.s, t_stop = (t_start+segment_duration)*pq.s ,
                                            units = pq.s,
                                            name = 'it is a spiketrain from exampleio',
                                            )

        if lazy:
            # we add the attribute lazy_shape with the size if loaded
            spiketr.lazy_shape = (num_spike_by_spiketrain,)

        # ours spiketrains also hold the waveforms:

        # 1 generate a fake spike shape (2d array if trodness >1)
        w1 = -stats.nct.pdf(np.arange(11,60,4), 5,20)[::-1]/3.
        w2 = stats.nct.pdf(np.arange(11,60,2), 5,20)
        w = np.r_[ w1 , w2 ]
        w = -w/max(w)

        if not lazy:
            # in the neo API the waveforms attr is 3 D in case tetrode
            # in our case it is mono electrode so dim 1 is size 1
            waveforms  = np.tile( w[np.newaxis,np.newaxis,:], ( num_spike_by_spiketrain ,1, 1) )
            waveforms *=  np.random.randn(*waveforms.shape)/6+1
            spiketr.waveforms = waveforms*pq.mV
            spiketr.sampling_rate = sr * pq.Hz
            spiketr.left_sweep = 1.5* pq.s

        # for attributes out of neo you can annotate
        spiketr.annotate(channel_index = channel_index)

        return spiketr
Example #34
0
    def create_all_annotated(cls):
        times = cls.rquant(1, pq.s)
        signal = cls.rquant(1, pq.V)
        blk = Block()
        blk.annotate(**cls.rdict(3))

        seg = Segment()
        seg.annotate(**cls.rdict(4))
        blk.segments.append(seg)

        asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
        asig.annotate(**cls.rdict(2))
        seg.analogsignals.append(asig)

        isig = IrregularlySampledSignal(times=times, signal=signal,
                                        time_units=pq.s)
        isig.annotate(**cls.rdict(2))
        seg.irregularlysampledsignals.append(isig)

        epoch = Epoch(times=times, durations=times)
        epoch.annotate(**cls.rdict(4))
        seg.epochs.append(epoch)

        event = Event(times=times)
        event.annotate(**cls.rdict(4))
        seg.events.append(event)

        spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
        d = cls.rdict(6)
        d["quantity"] = pq.Quantity(10, "mV")
        d["qarray"] = pq.Quantity(range(10), "mA")
        spiketrain.annotate(**d)
        seg.spiketrains.append(spiketrain)

        chx = ChannelIndex(name="achx", index=[1, 2], channel_ids=[0, 10])
        chx.annotate(**cls.rdict(5))
        blk.channel_indexes.append(chx)

        unit = Unit()
        unit.annotate(**cls.rdict(2))
        chx.units.append(unit)

        return blk
Example #35
0
def find_peak(seg, varname):
    for i, sig in enumerate(seg.analogsignals):
        if sig.name in varname:
            print("finding peaks for "+sig.name)
            p = ssp.find_peaks_cwt(sig.flatten(), np.arange(100, 550),
                                   min_length=10)
            peaks = p / sig.sampling_rate + sig.t_start
            peaks = SpikeTrain(times=peaks, t_start=sig.t_start,
                               t_stop=sig.t_stop, name=sig.name)
            seg.spiketrains.append(peaks)
Example #36
0
 def test_read_segment_containing_spiketrains_using_eager_cascade(self):
     io = self.io_cls(self.test_file)
     segment = io.read_segment(lazy=False, cascade=True)
     self.assertIsInstance(segment, Segment)
     self.assertEqual(len(segment.spiketrains), NCELLS)
     st0 = segment.spiketrains[0]
     self.assertIsInstance(st0, SpikeTrain)
     assert_arrays_equal(st0,
                         SpikeTrain(np.arange(0, 101, dtype=float),
                                    t_start=0*pq.s,
                                    t_stop=101*pq.ms,
                                    units=pq.ms))
     st4 = segment.spiketrains[4]
     self.assertIsInstance(st4, SpikeTrain)
     assert_arrays_equal(st4,
                         SpikeTrain(np.arange(4, 105, dtype=float),
                                    t_start=0*pq.s,
                                    t_stop=105*pq.ms,
                                    units=pq.ms))
    def read_segment(self,
                            lazy = False,
                            cascade = True,
                            delimiter = '\t',
                            t_start = 0.*pq.s,
                            unit = pq.s,
                            ):
        """
        Arguments:
            delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
            t_start : time start of all spiketrain 0 by default
            unit : unit of spike times, can be a str or directly a Quantities
        """
        unit = pq.Quantity(1, unit)

        seg = Segment(file_origin = os.path.basename(self.filename))
        if not cascade:
            return seg

        f = open(self.filename, 'Ur')
        for i,line in enumerate(f) :
            alldata = line[:-1].split(delimiter)
            if alldata[-1] == '': alldata = alldata[:-1]
            if alldata[0] == '': alldata = alldata[1:]
            if lazy:
                spike_times = [ ]
                t_stop = t_start
            else:
                spike_times = np.array(alldata).astype('f')
                t_stop = spike_times.max()*unit

            sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop)
            if lazy:
                sptr.lazy_shape = len(alldata)

            sptr.annotate(channel_index = i)
            seg.spiketrains.append(sptr)
        f.close()

        seg.create_many_to_one_relationship()
        return seg
Example #38
0
    def test_spiketrain_write(self):
        block = Block()
        seg = Segment()
        block.segments.append(seg)

        spiketrain = SpikeTrain(times=[3, 4, 5]*pq.s, t_stop=10.0,
                                name="spikes!", description="sssssspikes")
        seg.spiketrains.append(spiketrain)
        self.write_and_compare([block])

        waveforms = self.rquant((3, 5, 10), pq.mV)
        spiketrain = SpikeTrain(times=[1, 1.1, 1.2]*pq.ms, t_stop=1.5*pq.s,
                                name="spikes with wf",
                                description="spikes for waveform test",
                                waveforms=waveforms)

        seg.spiketrains.append(spiketrain)
        self.write_and_compare([block])

        spiketrain.left_sweep = np.random.random(10)*pq.ms
        self.write_and_compare([block])
Example #39
0
    def __save_segment(self):
        '''
        Write the segment to the Block if it exists
        '''
        # if this is the beginning of the first condition, then we don't want
        # to save, so exit
        # but set __seg from None to False so we know next time to create a
        # segment even if there are no spike in the condition
        if self.__seg is None:
            self.__seg = False
            return

        if not self.__seg:
            # create dummy values if there are no SpikeTrains in this condition
            self.__seg = Segment(file_origin=self._filename,
                                 **self.__params)
            self.__spiketimes = []

        if self.__lazy:
            train = SpikeTrain(pq.Quantity([], dtype=np.float32,
                                           units=pq.ms),
                               t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
                               file_origin=self._filename)
            train.lazy_shape = len(self.__spiketimes)
        else:
            times = pq.Quantity(self.__spiketimes, dtype=np.float32,
                                units=pq.ms)
            train = SpikeTrain(times,
                               t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
                               file_origin=self._filename)

        self.__seg.spiketrains = [train]
        self.__unit.spiketrains.append(train)
        self._blk.segments.append(self.__seg)

        # set an empty segment
        # from now on, we need to set __seg to False rather than None so
        # that if there is a condition with no SpikeTrains we know
        # to create an empty Segment
        self.__seg = False
 def _handle_processing_group(self, block):
     # todo: handle other modules than Units
     units_group = self._file.get('processing/Units/UnitTimes')
     segment_map = dict((segment.name, segment) for segment in block.segments)
     for name, group in units_group.items():
         if name == 'unit_list':
             pass  # todo
         else:
             segment_name = group['source'].value
             #desc = group['unit_description'].value  # use this to store Neo Unit id?
             segment = segment_map[segment_name]
             if self._lazy:
                 times = np.array(())
                 lazy_shape = group['times'].shape
             else:
                 times = group['times'].value
             spiketrain = SpikeTrain(times, units=pq.second,
                                     t_stop=group['t_stop'].value*pq.second)  # todo: this is a custom Neo value, general NWB files will not have this - use segment.t_stop instead in that case?
             if self._lazy:
                 spiketrain.lazy_shape = lazy_shape
             spiketrain.segment = segment
             segment.spiketrains.append(spiketrain)
Example #41
0
    def read_segment(self, import_neuroshare_segment = True,
                     lazy=False, cascade=True):
        """
        Arguments:
            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.

        """
        seg = Segment( file_origin = os.path.basename(self.filename), )
        
        if sys.platform.startswith('win'):
            neuroshare = ctypes.windll.LoadLibrary(self.dllname)
        elif sys.platform.startswith('linux'):
            neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
        neuroshare = DllWithError(neuroshare)
        
        #elif sys.platform.startswith('darwin'):
        

        # API version
        info = ns_LIBRARYINFO()
        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))

        if not cascade:
            return seg


        # open file
        hFile = ctypes.c_uint32(0)
        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
        fileinfo = ns_FILEINFO()
        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
        
        # read all entities
        for dwEntityID in range(fileinfo.dwEntityCount):
            entityInfo = ns_ENTITYINFO()
            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))

            # EVENT
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                pEventInfo = ns_EVENTINFO()
                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))

                if pEventInfo.dwEventType == 0: #TEXT
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 1:#CVS
                    pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
                elif pEventInfo.dwEventType == 2:# 8bit
                    pData = ctypes.c_byte(0)
                elif pEventInfo.dwEventType == 3:# 16bit
                    pData = ctypes.c_int16(0)
                elif pEventInfo.dwEventType == 4:# 32bit
                    pData = ctypes.c_int32(0)
                pdTimeStamp  = ctypes.c_double(0.)
                pdwDataRetSize = ctypes.c_uint32(0)

                ea = Event(name = str(entityInfo.szEntityLabel),)
                if not lazy:
                    times = [ ]
                    labels = [ ]
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
                        times.append(pdTimeStamp.value)
                        labels.append(str(pData.value))
                    ea.times = times*pq.s
                    ea.labels = np.array(labels, dtype ='S')
                else :
                    ea.lazy_shape = entityInfo.dwItemCount
                seg.eventarrays.append(ea)

            # analog
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                pAnalogInfo = ns_ANALOGINFO()

                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
                dwIndexCount = entityInfo.dwItemCount

                if lazy:
                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
                else:
                    pdwContCount = ctypes.c_uint32(0)
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    total_read = 0
                    while total_read< entityInfo.dwItemCount:
                        dwStartIndex = ctypes.c_uint32(total_read)
                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
                        
                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                        total_read += pdwContCount.value
                            
                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)

                #t_start
                dwIndex = 0
                pdTime = ctypes.c_double(0)
                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))

                anaSig = AnalogSignal(signal,
                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
                                                    t_start = pdTime.value * pq.s,
                                                    name = str(entityInfo.szEntityLabel),
                                                    )
                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
                if lazy:
                    anaSig.lazy_shape = entityInfo.dwItemCount
                seg.analogsignals.append( anaSig )


            #segment
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:

                pdwSegmentInfo = ns_SEGMENTINFO()
                if not str(entityInfo.szEntityLabel).startswith('spks'):
                    continue

                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
                nsource = pdwSegmentInfo.dwSourceCount

                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
                neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
                
                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
                    pSourceInfo = ns_SEGSOURCEINFO()
                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )

                if lazy:
                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
                    sptr.lazy_shape = entityInfo.dwItemCount
                else:
                    pdTimeStamp  = ctypes.c_double(0.)
                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
                    pdwSampleCount = ctypes.c_uint32(0)
                    pdwUnitID= ctypes.c_uint32(0)

                    nsample  = int(dwDataBufferSize)
                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
                    for dwIndex in range(entityInfo.dwItemCount ):
                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
                                ctypes.byref(pdwUnitID ) )

                        times[dwIndex] = pdTimeStamp.value
                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
                    
                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
                                        t_stop = times.max(),
                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
                                        name = str(entityInfo.szEntityLabel),
                                        )
                seg.spiketrains.append(sptr)


            # neuralevent
            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':

                pNeuralInfo = ns_NEURALINFO()
                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))

                if lazy:
                    times = [ ]*pq.s
                    t_stop = 0*pq.s
                else:
                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
                    dwStartIndex = 0
                    dwIndexCount = entityInfo.dwItemCount
                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
                    times = pData*pq.s
                    t_stop = times.max()
                sptr = SpikeTrain(times, t_stop =t_stop,
                                                name = str(entityInfo.szEntityLabel),)
                if lazy:
                    sptr.lazy_shape = entityInfo.dwItemCount
                seg.spiketrains.append(sptr)

        # close
        neuroshare.ns_CloseFile(hFile)

        seg.create_many_to_one_relationship()
        return seg
Example #42
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """

        """

        fid = open(self.filename, "rb")
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader["Year"],
            globalHeader["Month"],
            globalHeader["Day"],
            globalHeader["Hour"],
            globalHeader["Minute"],
            globalHeader["Second"],
        )
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(plexon_version=globalHeader["Version"])

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = sipkes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader["NumDSPChannels"]):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader["Template"] = np.array(channelHeader["Template"]).reshape((5, 64))
            channelHeader["Boxes"] = np.array(channelHeader["Boxes"]).reshape((5, 2, 4))
            dspChannelHeaders[channelHeader["Channel"]] = channelHeader
            maxunit = max(channelHeader["NUnits"], maxunit)
            maxchan = max(channelHeader["Channel"], maxchan)

        # event channel header
        eventHeaders = {}
        for _ in range(globalHeader["NumEventChannels"]):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader["Channel"]] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader["NumSlowChannels"]):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader["Channel"]] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype="f")

        # spiketimes and waveform
        nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype="i")
        wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype="i")

        # eventarrays
        nb_events = {}
        # maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            # maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() != -1:
            # read block header
            dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
            if dataBlockHeader is None:
                break
            chan = dataBlockHeader["Channel"]
            unit = dataBlockHeader["Unit"]
            n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
            time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]

            if dataBlockHeader["Type"] == 1:
                nb_spikes[chan, unit] += 1
                wf_sizes[chan, unit, :] = [n1, n2]
                fid.seek(n1 * n2 * 2, 1)
            elif dataBlockHeader["Type"] == 4:
                # event
                nb_events[chan] += 1
            elif dataBlockHeader["Type"] == 5:
                # continuous signal
                fid.seek(n2 * 2, 1)
                if n2 > 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] == 0:
                    t_starts[chan] = time

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = {}
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])

            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit], dtype="f")
                if load_spike_waveform:
                    n1, n2 = wf_sizes[chan, unit, :]
                    swfarrays[chan, unit] = np.zeros((nb_spikes[chan, unit], n1, n2), dtype="f4")
            pos_spikes = np.zeros(nb_spikes.shape, dtype="i")

            # allocating mem for event
            eventpositions = {}
            evarrays = {}
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = np.zeros(nb, dtype="f")
                eventpositions[chan] = 0

            fid.seek(start)
            while fid.tell() != -1:
                dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(offset=None)
                if dataBlockHeader is None:
                    break
                chan = dataBlockHeader["Channel"]
                n1, n2 = dataBlockHeader["NumberOfWaveforms"], dataBlockHeader["NumberOfWordsInWaveform"]
                time = dataBlockHeader["UpperByteOf5ByteTimestamp"] * 2.0 ** 32 + dataBlockHeader["TimeStamp"]
                time /= globalHeader["ADFrequency"]

                if n2 < 0:
                    break
                if dataBlockHeader["Type"] == 1:
                    # spike
                    unit = dataBlockHeader["Unit"]
                    pos = pos_spikes[chan, unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1 * n2 != 0:
                        swfarrays[chan, unit][pos, :, :] = (
                            np.fromstring(fid.read(n1 * n2 * 2), dtype="i2").reshape(n1, n2).astype("f4")
                        )
                    else:
                        fid.seek(n1 * n2 * 2, 1)
                    pos_spikes[chan, unit] += 1

                elif dataBlockHeader["Type"] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan][pos] = time
                    eventpositions[chan] += 1

                elif dataBlockHeader["Type"] == 5:
                    # signal
                    data = np.fromstring(fid.read(n2 * 2), dtype="i2").astype("f4")
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan] + data.size] = data
                    sample_positions[chan] += data.size

        ## Step 3: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
            else:
                times = evarrays[chan]
            ea = EventArray(times * pq.s, channel_name=eventHeaders[chan]["Name"], channel_index=chan)
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = []
            else:
                if globalHeader["Version"] == 100 or globalHeader["Version"] == 101:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * 1000.0)
                elif globalHeader["Version"] == 102:
                    gain = 5000.0 / (2048 * slowChannelHeaders[chan]["Gain"] * slowChannelHeaders[chan]["PreampGain"])
                elif globalHeader["Version"] >= 103:
                    gain = globalHeader["SlowMaxMagnitudeMV"] / (
                        0.5
                        * (2 ** globalHeader["BitsPerSpikeSample"])
                        * slowChannelHeaders[chan]["Gain"]
                        * slowChannelHeaders[chan]["PreampGain"]
                    )
                signal = sigarrays[chan] * gain
            anasig = AnalogSignal(
                signal * pq.V,
                sampling_rate=float(slowChannelHeaders[chan]["ADFreq"]) * pq.Hz,
                t_start=t_starts[chan] * pq.s,
                channel_index=slowChannelHeaders[chan]["Channel"],
                channel_name=slowChannelHeaders[chan]["Name"],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)

        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0:
                continue
            if lazy:
                times = []
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan, unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader["Version"] < 103:
                        gain = 3000.0 / (2048 * dspChannelHeaders[chan]["Gain"] * 1000.0)
                    elif globalHeader["Version"] >= 103 and globalHeader["Version"] < 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * 1000.0
                        )
                    elif globalHeader["Version"] > 105:
                        gain = globalHeader["SpikeMaxMagnitudeMV"] / (
                            0.5 * 2.0 ** (globalHeader["BitsPerSpikeSample"]) * globalHeader["SpikePreAmpGain"]
                        )
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(times, units="s", t_stop=t_stop * pq.s, waveforms=waveforms)
            sptr.annotate(unit_name=dspChannelHeaders[chan]["Name"])
            sptr.annotate(channel_index=chan)
            if lazy:
                sptr.lazy_shape = nb_spikes[chan, unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
Example #43
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)

            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            global_t_start = None
            # Step 1 : first loop for counting - tsq file
            tsq = open(os.path.join(subdir, tankname+'_'+blockname+'.tsq'), 'rb')
            hr = HeaderReader(tsq, TsqDescription)
            allsig = { }
            allspiketr = { }
            allevent = { }
            while 1:
                h= hr.read_f()
                if h==None:break

                channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                if Types[evtype] == 'EVTYPE_UNKNOWN':
                    pass

                elif Types[evtype] == 'EVTYPE_MARK' :
                    if global_t_start is None:
                        global_t_start = h['timestamp']

                elif Types[evtype] == 'EVTYPE_SCALER' :
                    # TODO
                    pass

                elif Types[evtype] == 'EVTYPE_STRON' or \
                     Types[evtype] == 'EVTYPE_STROFF':
                    # EVENTS

                    if code not in allevent:
                        allevent[code] = { }
                    if channel not in allevent[code]:
                        ea = EventArray(name = code , channel_index = channel)
                        # for counting:
                        ea.lazy_shape = 0
                        ea.maxlabelsize = 0


                        allevent[code][channel] = ea

                    allevent[code][channel].lazy_shape += 1
                    strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    strobe = str(strobe)
                    if len(strobe)>= allevent[code][channel].maxlabelsize:
                        allevent[code][channel].maxlabelsize = len(strobe)

                    #~ ev = Event()
                    #~ ev.time = h['timestamp'] - global_t_start
                    #~ ev.name = code
                     #~ # it the strobe attribute masked with eventoffset
                    #~ strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                    #~ ev.label = str(strobe)
                    #~ seg._events.append( ev )

                elif Types[evtype] == 'EVTYPE_SNIP' :

                    if code not in allspiketr:
                        allspiketr[code] = { }
                    if channel not in allspiketr[code]:
                        allspiketr[code][channel] = { }
                    if h['sortcode'] not in allspiketr[code][channel]:





                        sptr = SpikeTrain([ ], units = 's',
                                                        name = str(h['sortcode']),
                                                        #t_start = global_t_start,
                                                        t_start = 0.*pq.s,
                                                        t_stop = 0.*pq.s, # temporary
                                                        left_sweep = (h['size']-10.)/2./h['frequency'] * pq.s,
                                                        sampling_rate = h['frequency'] * pq.Hz,

                                                        )
                        #~ sptr.channel = channel
                        #sptr.annotations['channel_index'] = channel
                        sptr.annotate(channel_index = channel)

                        # for counting:
                        sptr.lazy_shape = 0
                        sptr.pos = 0
                        sptr.waveformsize = h['size']-10

                        #~ sptr.name = str(h['sortcode'])
                        #~ sptr.t_start = global_t_start
                        #~ sptr.sampling_rate = h['frequency']
                        #~ sptr.left_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.right_sweep = (h['size']-10.)/2./h['frequency']
                        #~ sptr.waveformsize = h['size']-10

                        allspiketr[code][channel][h['sortcode']] = sptr

                    allspiketr[code][channel][h['sortcode']].lazy_shape += 1

                elif Types[evtype] == 'EVTYPE_STREAM':
                    if code not in allsig:
                        allsig[code] = { }
                    if channel not in allsig[code]:
                        #~ print 'code', code, 'channel',  channel
                        anaSig = AnalogSignal([] * pq.V,
                                              name=code,
                                              sampling_rate=
                                              h['frequency'] * pq.Hz,
                                              t_start=(h['timestamp'] -
                                                       global_t_start) * pq.s,
                                              channel_index=channel)
                        anaSig.lazy_dtype = np.dtype(DataFormats[h['dataformat']])
                        anaSig.pos = 0

                        # for counting:
                        anaSig.lazy_shape = 0
                        #~ anaSig.pos = 0
                        allsig[code][channel] = anaSig
                    allsig[code][channel].lazy_shape += (h['size']*4-40)/anaSig.dtype.itemsize

            if not lazy:
                # Step 2 : allocate memory
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        v[channel] = anaSig.duplicate_with_new_array(np.zeros((anaSig.lazy_shape) , dtype = anaSig.lazy_dtype)*pq.V )
                        v[channel].pos = 0

                for code, v in iteritems(allevent):
                    for channel, ea in iteritems(v):
                        ea.times = np.empty( (ea.lazy_shape)  ) * pq.s
                        ea.labels = np.empty( (ea.lazy_shape), dtype = 'S'+str(ea.maxlabelsize) )
                        ea.pos = 0

                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            new = SpikeTrain(np.zeros( (sptr.lazy_shape), dtype = 'f8' ) *pq.s ,
                                                            name = sptr.name,
                                                            t_start = sptr.t_start,
                                                            t_stop = sptr.t_stop,
                                                            left_sweep = sptr.left_sweep,
                                                            sampling_rate = sptr.sampling_rate,
                                                            waveforms = np.ones( (sptr.lazy_shape, 1, sptr.waveformsize) , dtype = 'f') * pq.mV ,
                                                        )
                            new.annotations.update(sptr.annotations)
                            new.pos = 0
                            new.waveformsize = sptr.waveformsize
                            allsorted[sortcode] = new

                # Step 3 : searh sev (individual data files) or tev (common data file)
                # sev is for version > 70
                if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                    tev = open(os.path.join(subdir, tankname+'_'+blockname+'.tev'), 'rb')
                else:
                    tev = None
                for code, v in iteritems(allsig):
                    for channel, anaSig in iteritems(v):
                        if PY3K:
                            signame = anaSig.name.decode('ascii')
                        else:
                            signame = anaSig.name
                        filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(anaSig.channel_index)+'.sev')
                        if os.path.exists(filename):
                            anaSig.fid = open(filename, 'rb')
                        else:
                            anaSig.fid = tev
                for code, v in iteritems(allspiketr):
                    for channel, allsorted in iteritems(v):
                        for sortcode, sptr in iteritems(allsorted):
                            sptr.fid = tev

                # Step 4 : second loop for copyin chunk of data
                tsq.seek(0)
                while 1:
                    h= hr.read_f()
                    if h==None:break
                    channel, code ,  evtype = h['channel'], h['code'], h['evtype']

                    if Types[evtype] == 'EVTYPE_STREAM':
                        a = allsig[code][channel]
                        dt = a.dtype
                        s = int((h['size']*4-40)/dt.itemsize)
                        a.fid.seek(h['eventoffset'])
                        a[ a.pos:a.pos+s ]  = np.fromstring( a.fid.read( s*dt.itemsize ), dtype = a.dtype)
                        a.pos += s

                    elif Types[evtype] == 'EVTYPE_STRON' or \
                        Types[evtype] == 'EVTYPE_STROFF':
                        ea = allevent[code][channel]
                        ea.times[ea.pos] = (h['timestamp'] - global_t_start) * pq.s
                        strobe, = struct.unpack('d' , struct.pack('q' , h['eventoffset']))
                        ea.labels[ea.pos] = str(strobe)
                        ea.pos += 1

                    elif Types[evtype] == 'EVTYPE_SNIP':
                        sptr = allspiketr[code][channel][h['sortcode']]
                        sptr.t_stop =  (h['timestamp'] - global_t_start) * pq.s
                        sptr[sptr.pos] = (h['timestamp'] - global_t_start) * pq.s
                        sptr.waveforms[sptr.pos, 0, :] = np.fromstring( sptr.fid.read( sptr.waveformsize*4 ), dtype = 'f4') * pq.V
                        sptr.pos += 1


            # Step 5 : populating segment
            for code, v in iteritems(allsig):
                for channel, anaSig in iteritems(v):
                    seg.analogsignals.append( anaSig )

            for code, v in iteritems(allevent):
                for channel, ea in iteritems(v):
                    seg.eventarrays.append( ea )


            for code, v in iteritems(allspiketr):
                for channel, allsorted in iteritems(v):
                    for sortcode, sptr in iteritems(allsorted):
                        seg.spiketrains.append( sptr )

        bl.create_many_to_one_relationship()
        return bl
Example #44
0
    def read_segment(self, block_index=0, seg_index=0, lazy=False,
                     signal_group_mode=None, load_waveforms=False, time_slice=None):
        """
        :param block_index: int default 0. In case of several block block_index can be specified.

        :param seg_index: int default 0. Index of segment.

        :param lazy: False by default.

        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
        This control behavior for grouping channels in AnalogSignal.
            * 'split-all': each channel will give an AnalogSignal
            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
            a 2D AnalogSignal

        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.

        :param time_slice: None by default means no limit.
            A time slice is (t_start, t_stop) both are quantities.
            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.
        """

        if lazy:
            warnings.warn(
                "Lazy is deprecated and will be replaced by ProxyObject functionality.",
                DeprecationWarning)

        if signal_group_mode is None:
            signal_group_mode = self._prefered_signal_group_mode

        # annotations
        seg_annotations = dict(self.raw_annotations['blocks'][block_index]['segments'][seg_index])
        for k in ('signals', 'units', 'events'):
            seg_annotations.pop(k)
        seg_annotations = check_annotations(seg_annotations)

        seg = Segment(index=seg_index, **seg_annotations)

        seg_t_start = self.segment_t_start(block_index, seg_index) * pq.s
        seg_t_stop = self.segment_t_stop(block_index, seg_index) * pq.s

        # get only a slice of objects limited by t_start and t_stop time_slice = (t_start, t_stop)
        if time_slice is None:
            t_start, t_stop = None, None
            t_start_, t_stop_ = None, None
        else:
            assert not lazy, 'time slice only work when not lazy'
            t_start, t_stop = time_slice

            t_start = ensure_second(t_start)
            t_stop = ensure_second(t_stop)

            # checks limits
            if t_start < seg_t_start:
                t_start = seg_t_start
            if t_stop > seg_t_stop:
                t_stop = seg_t_stop

            # in float format in second (for rawio clip)
            t_start_, t_stop_ = float(t_start.magnitude), float(t_stop.magnitude)

            # new spiketrain limits
            seg_t_start = t_start
            seg_t_stop = t_stop

        # AnalogSignal
        signal_channels = self.header['signal_channels']

        if signal_channels.size > 0:
            channel_indexes_list = self.get_group_channel_indexes()
            for channel_indexes in channel_indexes_list:
                sr = self.get_signal_sampling_rate(channel_indexes) * pq.Hz
                sig_t_start = self.get_signal_t_start(
                    block_index, seg_index, channel_indexes) * pq.s

                sig_size = self.get_signal_size(block_index=block_index, seg_index=seg_index,
                                                channel_indexes=channel_indexes)
                if not lazy:
                    # in case of time_slice get: get i_start, i_stop, new sig_t_start
                    if t_stop is not None:
                        i_stop = int((t_stop - sig_t_start).magnitude * sr.magnitude)
                        if i_stop > sig_size:
                            i_stop = sig_size
                    else:
                        i_stop = None
                    if t_start is not None:
                        i_start = int((t_start - sig_t_start).magnitude * sr.magnitude)
                        if i_start < 0:
                            i_start = 0
                        sig_t_start += (i_start / sr).rescale('s')
                    else:
                        i_start = None

                    raw_signal = self.get_analogsignal_chunk(block_index=block_index,
                                                             seg_index=seg_index, i_start=i_start,
                                                             i_stop=i_stop,
                                                             channel_indexes=channel_indexes)
                    float_signal = self.rescale_signal_raw_to_float(
                        raw_signal,
                        dtype='float32',
                        channel_indexes=channel_indexes)

                for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
                        channel_indexes,
                        signal_group_mode=signal_group_mode).items():
                    units = np.unique(signal_channels[ind_abs]['units'])
                    assert len(units) == 1
                    units = ensure_signal_units(units[0])

                    if signal_group_mode == 'split-all':
                        # in that case annotations by channel is OK
                        chan_index = ind_abs[0]
                        d = self.raw_annotations['blocks'][block_index]['segments'][seg_index][
                            'signals'][chan_index]
                        annotations = dict(d)
                        if 'name' not in annotations:
                            annotations['name'] = signal_channels['name'][chan_index]
                    else:
                        # when channel are grouped by same unit
                        # annotations have channel_names and channel_ids array
                        # this will be moved in array annotations soon
                        annotations = {}
                        annotations['name'] = 'Channel bundle ({}) '.format(
                            ','.join(signal_channels[ind_abs]['name']))
                        annotations['channel_names'] = signal_channels[ind_abs]['name']
                        annotations['channel_ids'] = signal_channels[ind_abs]['id']
                    annotations = check_annotations(annotations)
                    if lazy:
                        anasig = AnalogSignal(np.array([]), units=units, copy=False,
                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
                        anasig.lazy_shape = (sig_size, len(ind_within))
                    else:
                        anasig = AnalogSignal(float_signal[:, ind_within], units=units, copy=False,
                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
                    seg.analogsignals.append(anasig)

        # SpikeTrain and waveforms (optional)
        unit_channels = self.header['unit_channels']
        for unit_index in range(len(unit_channels)):
            if not lazy and load_waveforms:
                raw_waveforms = self.get_spike_raw_waveforms(block_index=block_index,
                                                             seg_index=seg_index,
                                                             unit_index=unit_index,
                                                             t_start=t_start_, t_stop=t_stop_)
                float_waveforms = self.rescale_waveforms_to_float(raw_waveforms, dtype='float32',
                                                                  unit_index=unit_index)
                wf_units = ensure_signal_units(unit_channels['wf_units'][unit_index])
                waveforms = pq.Quantity(float_waveforms, units=wf_units,
                                        dtype='float32', copy=False)
                wf_sampling_rate = unit_channels['wf_sampling_rate'][unit_index]
                wf_left_sweep = unit_channels['wf_left_sweep'][unit_index]
                if wf_left_sweep > 0:
                    wf_left_sweep = float(wf_left_sweep) / wf_sampling_rate * pq.s
                else:
                    wf_left_sweep = None
                wf_sampling_rate = wf_sampling_rate * pq.Hz
            else:
                waveforms = None
                wf_left_sweep = None
                wf_sampling_rate = None

            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][
                unit_index]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = unit_channels['name'][c]
            annotations = check_annotations(annotations)

            if not lazy:
                spike_timestamp = self.get_spike_timestamps(block_index=block_index,
                                                            seg_index=seg_index,
                                                            unit_index=unit_index,
                                                            t_start=t_start_, t_stop=t_stop_)
                spike_times = self.rescale_spike_timestamp(spike_timestamp, 'float64')
                sptr = SpikeTrain(spike_times, units='s', copy=False,
                                  t_start=seg_t_start, t_stop=seg_t_stop,
                                  waveforms=waveforms, left_sweep=wf_left_sweep,
                                  sampling_rate=wf_sampling_rate, **annotations)
            else:
                nb = self.spike_count(block_index=block_index, seg_index=seg_index,
                                      unit_index=unit_index)
                sptr = SpikeTrain(np.array([]), units='s', copy=False, t_start=seg_t_start,
                                  t_stop=seg_t_stop, **annotations)
                sptr.lazy_shape = (nb,)

            seg.spiketrains.append(sptr)

        # Events/Epoch
        event_channels = self.header['event_channels']
        for chan_ind in range(len(event_channels)):
            if not lazy:
                ev_timestamp, ev_raw_durations, ev_labels = self.get_event_timestamps(
                    block_index=block_index,
                    seg_index=seg_index, event_channel_index=chan_ind,
                    t_start=t_start_, t_stop=t_stop_)
                ev_times = self.rescale_event_timestamp(ev_timestamp, 'float64') * pq.s
                if ev_raw_durations is None:
                    ev_durations = None
                else:
                    ev_durations = self.rescale_epoch_duration(ev_raw_durations, 'float64') * pq.s
                ev_labels = ev_labels.astype('S')
            else:
                nb = self.event_count(block_index=block_index, seg_index=seg_index,
                                      event_channel_index=chan_ind)
                lazy_shape = (nb,)
                ev_times = np.array([]) * pq.s
                ev_labels = np.array([], dtype='S')
                ev_durations = np.array([]) * pq.s

            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][
                chan_ind]
            annotations = dict(d)
            if 'name' not in annotations:
                annotations['name'] = event_channels['name'][chan_ind]

            annotations = check_annotations(annotations)

            if event_channels['type'][chan_ind] == b'event':
                e = Event(times=ev_times, labels=ev_labels, units='s', copy=False, **annotations)
                e.segment = seg
                seg.events.append(e)
            elif event_channels['type'][chan_ind] == b'epoch':
                e = Epoch(times=ev_times, durations=ev_durations, labels=ev_labels,
                          units='s', copy=False, **annotations)
                e.segment = seg
                seg.epochs.append(e)

            if lazy:
                e.lazy_shape = lazy_shape

        seg.create_many_to_one_relationship()
        return seg
Example #45
0
    def read_block(self,
                                        lazy = False,
                                        cascade = True,
                                ):
        bl = Block()
        tankname = os.path.basename(self.dirname)
        bl.file_origin = tankname
        if not cascade : return bl
        for blockname in os.listdir(self.dirname):
            if blockname == 'TempBlk': continue
            subdir = os.path.join(self.dirname,blockname)
            if not os.path.isdir(subdir): continue

            seg = Segment(name = blockname)
            bl.segments.append( seg)


            #TSQ is the global index
            tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq')
            dt = [('size','int32'),
                        ('evtype','int32'),
                        ('code','S4'),
                        ('channel','uint16'),
                        ('sortcode','uint16'),
                        ('timestamp','float64'),
                        ('eventoffset','int64'),
                        ('dataformat','int32'),
                        ('frequency','float32'),
                    ]
            tsq = np.fromfile(tsq_filename, dtype = dt)
            
            #0x8801: 'EVTYPE_MARK' give the global_start
            global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0]
           
            #TEV is the old data file
            if os.path.exists(os.path.join(subdir, tankname+'_'+blockname+'.tev')):
                tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev')
                #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                tev_array = np.fromfile(tev_filename, dtype = 'uint8')
                
            else:
                tev_filename = None


            for type_code, type_label in tdt_event_type:
                mask1 = tsq['evtype']==type_code
                codes = np.unique(tsq[mask1]['code'])
                
                for code in codes:
                    mask2 = mask1 & (tsq['code']==code)
                    channels = np.unique(tsq[mask2]['channel'])
                    
                    for channel in channels:
                        mask3 = mask2 & (tsq['channel']==channel)
                        
                        if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                            if lazy:
                                times = [ ]*pq.s
                                labels = np.array([ ], dtype = str)
                            else:
                                times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
                                labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
                            ea = EventArray(times = times, name = code , channel_index = int(channel), labels = labels)
                            if lazy:
                                ea.lazy_shape = np.sum(mask3)
                            seg.eventarrays.append(ea)
                        
                        elif type_label == 'EVTYPE_SNIP':
                            sortcodes = np.unique(tsq[mask3]['sortcode'])
                            for sortcode in sortcodes:
                                mask4 = mask3 & (tsq['sortcode']==sortcode)
                                nb_spike = np.sum(mask4)
                                sr = tsq[mask4]['frequency'][0]
                                waveformsize = tsq[mask4]['size'][0]-10
                                if lazy:
                                    times = [ ]*pq.s
                                    waveforms = None
                                else:
                                    times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s
                                    dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])                                    
                                    waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt)
                                    waveforms = waveforms.reshape(nb_spike, -1, waveformsize)
                                    waveforms = waveforms * pq.mV
                                if nb_spike>0:
                                 #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                    t_start = 0 *pq.s
                                    t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s
                                    
                                else:
                                    t_start = 0 *pq.s
                                    t_stop = 0 *pq.s
                                st = SpikeTrain(times = times, 
                                                                name = 'Chan{} Code{}'.format(channel,sortcode),
                                                                t_start = t_start,
                                                                t_stop = t_stop,
                                                                waveforms = waveforms,
                                                                left_sweep = waveformsize/2./sr * pq.s,
                                                                sampling_rate = sr * pq.Hz,
                                                                )
                                st.annotate(channel_index = channel)
                                if lazy:
                                    st.lazy_shape = nb_spike
                                seg.spiketrains.append(st)
                        
                        elif type_label == 'EVTYPE_STREAM':
                            dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
                            shape = np.sum(tsq[mask3]['size']-10)
                            sr = tsq[mask3]['frequency'][0]
                            if lazy:
                                signal = [ ]
                            else:
                                if PY3K:
                                    signame = code.decode('ascii')
                                else:
                                    signame = code
                                sev_filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(channel)+'.sev')
                                if os.path.exists(sev_filename):
                                    #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                    sig_array = np.fromfile(sev_filename, dtype = 'uint8')
                                else:
                                    sig_array = tev_array
                                signal = get_chunks(tsq[mask3]['size'],tsq[mask3]['eventoffset'],  sig_array).view(dt)
                            
                            anasig = AnalogSignal(signal = signal* pq.V,
                                                                    name = '{} {}'.format(code, channel),
                                                                    sampling_rate= sr * pq.Hz,
                                                                    t_start = (tsq[mask3]['timestamp'][0] - global_t_start) * pq.s,
                                                                    channel_index = int(channel))
                            if lazy:
                                anasig.lazy_shape = shape
                            seg.analogsignals.append(anasig)
        bl.create_many_to_one_relationship()
        return bl
Example #46
0
    def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
        """
        Read in a segment.

        Arguments:
            load_spike_waveform : load or not waveform of spikes (default True)

        """

        fid = open(self.filename, 'rb')
        globalHeader = HeaderReader(fid, GlobalHeader).read_f(offset=0)

        # metadatas
        seg = Segment()
        seg.rec_datetime = datetime.datetime(
            globalHeader.pop('Year'),
            globalHeader.pop('Month'),
            globalHeader.pop('Day'),
            globalHeader.pop('Hour'),
            globalHeader.pop('Minute'),
            globalHeader.pop('Second')
        )
        seg.file_origin = os.path.basename(self.filename)

        for key, val in globalHeader.iteritems():
            seg.annotate(**{key: val})

        if not cascade:
            return seg

        ## Step 1 : read headers
        # dsp channels header = spikes and waveforms
        dspChannelHeaders = {}
        maxunit = 0
        maxchan = 0
        for _ in range(globalHeader['NumDSPChannels']):
            # channel is 1 based
            channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
            channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
            channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
            dspChannelHeaders[channelHeader['Channel']] = channelHeader
            maxunit = max(channelHeader['NUnits'], maxunit)
            maxchan = max(channelHeader['Channel'], maxchan)

        # event channel header
        eventHeaders = { }
        for _ in range(globalHeader['NumEventChannels']):
            eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
            eventHeaders[eventHeader['Channel']] = eventHeader

        # slow channel header = signal
        slowChannelHeaders = {}
        for _ in range(globalHeader['NumSlowChannels']):
            slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(offset=None)
            slowChannelHeaders[slowChannelHeader['Channel']] = slowChannelHeader

        ## Step 2 : a first loop for counting size
        # signal
        nb_samples = np.zeros(len(slowChannelHeaders))
        sample_positions = np.zeros(len(slowChannelHeaders))
        t_starts = np.zeros(len(slowChannelHeaders), dtype='f')

        #spiketimes and waveform
        nb_spikes = np.zeros((maxchan+1, maxunit+1) ,dtype='i')
        wf_sizes = np.zeros((maxchan+1, maxunit+1, 2) ,dtype='i')

        # eventarrays
        nb_events = { }
        #maxstrsizeperchannel = { }
        for chan, h in iteritems(eventHeaders):
            nb_events[chan] = 0
            #maxstrsizeperchannel[chan] = 0

        start = fid.tell()
        while fid.tell() !=-1 :
            # read block header
            dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
            if dataBlockHeader is None : break
            chan = dataBlockHeader['Channel']
            unit = dataBlockHeader['Unit']
            n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
            time = (dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 +
                    dataBlockHeader['TimeStamp'])

            if dataBlockHeader['Type'] == 1:
                nb_spikes[chan,unit] +=1
                wf_sizes[chan,unit,:] = [n1,n2]
                fid.seek(n1*n2*2,1)
            elif dataBlockHeader['Type'] ==4:
                #event
                nb_events[chan] += 1
            elif dataBlockHeader['Type'] == 5:
                #continuous signal
                fid.seek(n2*2, 1)
                if n2> 0:
                    nb_samples[chan] += n2
                if nb_samples[chan] ==0:
                    t_starts[chan] = time
                    

        ## Step 3: allocating memory and 2 loop for reading if not lazy
        if not lazy:
            # allocating mem for signal
            sigarrays = { }
            for chan, h in iteritems(slowChannelHeaders):
                sigarrays[chan] = np.zeros(nb_samples[chan])
                
            # allocating mem for SpikeTrain
            stimearrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            swfarrays = np.zeros((maxchan+1, maxunit+1) ,dtype=object)
            for (chan, unit), _ in np.ndenumerate(nb_spikes):
                stimearrays[chan,unit] = np.zeros(nb_spikes[chan,unit], dtype = 'f')
                if load_spike_waveform:
                    n1,n2 = wf_sizes[chan, unit,:]
                    swfarrays[chan, unit] = np.zeros( (nb_spikes[chan, unit], n1, n2 ) , dtype = 'f4' )
            pos_spikes = np.zeros(nb_spikes.shape, dtype = 'i')
                    
            # allocating mem for event
            eventpositions = { }
            evarrays = { }
            for chan, nb in iteritems(nb_events):
                evarrays[chan] = {
                    'times': np.zeros(nb, dtype='f'),
                    'labels': np.zeros(nb, dtype='S4')
                }
                eventpositions[chan]=0 
                
            fid.seek(start)
            while fid.tell() !=-1 :
                dataBlockHeader = HeaderReader(fid , DataBlockHeader ).read_f(offset = None)
                if dataBlockHeader is None : break
                chan = dataBlockHeader['Channel']
                n1,n2 = dataBlockHeader['NumberOfWaveforms'] , dataBlockHeader['NumberOfWordsInWaveform']
                time = dataBlockHeader['UpperByteOf5ByteTimestamp']*2.**32 + dataBlockHeader['TimeStamp']
                time/= globalHeader['ADFrequency']

                if n2 <0: break
                if dataBlockHeader['Type'] == 1:
                    #spike
                    unit = dataBlockHeader['Unit']
                    pos = pos_spikes[chan,unit]
                    stimearrays[chan, unit][pos] = time
                    if load_spike_waveform and n1*n2 != 0 :
                        swfarrays[chan,unit][pos,:,:] = np.fromstring( fid.read(n1*n2*2) , dtype = 'i2').reshape(n1,n2).astype('f4')
                    else:
                        fid.seek(n1*n2*2,1)
                    pos_spikes[chan,unit] +=1
                
                elif dataBlockHeader['Type'] == 4:
                    # event
                    pos = eventpositions[chan]
                    evarrays[chan]['times'][pos] = time
                    evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
                    eventpositions[chan]+= 1

                elif dataBlockHeader['Type'] == 5:
                    #signal
                    data = np.fromstring( fid.read(n2*2) , dtype = 'i2').astype('f4')
                    sigarrays[chan][sample_positions[chan] : sample_positions[chan]+data.size] = data
                    sample_positions[chan] += data.size


        ## Step 4: create neo object
        for chan, h in iteritems(eventHeaders):
            if lazy:
                times = []
                labels = None
            else:
                times = evarrays[chan]['times']
                labels = evarrays[chan]['labels']
            ea = EventArray(
                times*pq.s,
                labels=labels,
                channel_name=eventHeaders[chan]['Name'],
                channel_index=chan
            )
            if lazy:
                ea.lazy_shape = nb_events[chan]
            seg.eventarrays.append(ea)

            
        for chan, h in iteritems(slowChannelHeaders):
            if lazy:
                signal = [ ]
            else:
                if globalHeader['Version'] ==100 or globalHeader['Version'] ==101 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*1000.)
                elif globalHeader['Version'] ==102 :
                    gain = 5000./(2048*slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                elif globalHeader['Version'] >= 103:
                    gain = globalHeader['SlowMaxMagnitudeMV']/(.5*(2**globalHeader['BitsPerSpikeSample'])*\
                                                        slowChannelHeaders[chan]['Gain']*slowChannelHeaders[chan]['PreampGain'])
                signal = sigarrays[chan]*gain
            anasig =  AnalogSignal(signal*pq.V,
                sampling_rate = float(slowChannelHeaders[chan]['ADFreq'])*pq.Hz,
                t_start = t_starts[chan]*pq.s,
                channel_index = slowChannelHeaders[chan]['Channel'],
                channel_name = slowChannelHeaders[chan]['Name'],
            )
            if lazy:
                anasig.lazy_shape = nb_samples[chan]
            seg.analogsignals.append(anasig)
            
        for (chan, unit), value in np.ndenumerate(nb_spikes):
            if nb_spikes[chan, unit] == 0: continue
            if lazy:
                times = [ ]
                waveforms = None
                t_stop = 0
            else:
                times = stimearrays[chan,unit]
                t_stop = times.max()
                if load_spike_waveform:
                    if globalHeader['Version'] <103:
                        gain = 3000./(2048*dspChannelHeaders[chan]['Gain']*1000.)
                    elif globalHeader['Version'] >=103 and globalHeader['Version'] <105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*1000.)
                    elif globalHeader['Version'] >105:
                        gain = globalHeader['SpikeMaxMagnitudeMV']/(.5*2.**(globalHeader['BitsPerSpikeSample'])*globalHeader['SpikePreAmpGain'])                    
                    waveforms = swfarrays[chan, unit] * gain * pq.V
                else:
                    waveforms = None
            sptr = SpikeTrain(
                times,
                units='s', 
                t_stop=t_stop*pq.s,
                waveforms=waveforms
            )
            sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
            sptr.annotate(channel_index = chan)
            for key, val in dspChannelHeaders[chan].iteritems():
                sptr.annotate(**{key: val})

            if lazy:
                sptr.lazy_shape = nb_spikes[chan,unit]
            seg.spiketrains.append(sptr)

        seg.create_many_to_one_relationship()
        return seg
Example #47
0
    def test__issue_285(self):
        ##Spiketrain
        train = SpikeTrain([3, 4, 5] * pq.s, t_stop=10.0)
        unit = Unit()
        train.unit = unit
        unit.spiketrains.append(train)

        epoch = Epoch([0, 10, 20], [2, 2, 2], ["a", "b", "c"], units="ms")

        blk = Block()
        seg = Segment()
        seg.spiketrains.append(train)
        seg.epochs.append(epoch)
        epoch.segment = seg
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.spiketrains[0].unit, Unit)
        self.assertIsInstance(r_seg.epochs[0], Epoch)
        os.remove('blk.pkl')
        ##Epoch
        train = Epoch(times=np.arange(0, 30, 10)*pq.s,durations=[10, 5, 7]*pq.ms,labels=np.array(['btn0', 'btn1', 'btn2'], dtype='S'))
        train.segment = Segment()
        unit = Unit()
        unit.spiketrains.append(train)
        blk = Block()
        seg = Segment()
        seg.spiketrains.append(train)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.spiketrains[0].segment, Segment)
        os.remove('blk.pkl')
        ##Event
        train = Event(np.arange(0, 30, 10)*pq.s,labels=np.array(['trig0', 'trig1', 'trig2'],dtype='S'))
        train.segment = Segment()
        unit = Unit()
        unit.spiketrains.append(train)

        blk = Block()
        seg = Segment()
        seg.spiketrains.append(train)
        blk.segments.append(seg)

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.spiketrains[0].segment, Segment)
        os.remove('blk.pkl')
        ##IrregularlySampledSignal
        train =  IrregularlySampledSignal([0.0, 1.23, 6.78], [1, 2, 3],units='mV', time_units='ms')
        train.segment = Segment()
        unit = Unit()
        train.channel_index = ChannelIndex(1)
        unit.spiketrains.append(train)

        blk = Block()
        seg = Segment()
        seg.spiketrains.append(train)
        blk.segments.append(seg)
        blk.segments[0].block = blk

        reader = PickleIO(filename="blk.pkl")
        reader.write(blk)

        reader = PickleIO(filename="blk.pkl")
        r_blk = reader.read_block()
        r_seg = r_blk.segments[0]
        self.assertIsInstance(r_seg.spiketrains[0].segment, Segment)
        self.assertIsInstance(r_seg.spiketrains[0].channel_index, ChannelIndex)
        os.remove('blk.pkl')
Example #48
0
    def read_block(self, lazy=False):
        """Returns a Block containing spike information.

        There is no obvious way to infer the segment boundaries from
        raw spike times, so for now all spike times are returned in one
        big segment. The way around this would be to specify the segment
        boundaries, and then change this code to put the spikes in the right
        segments.
        """
        assert not lazy, 'Do not support lazy'

        # Create block and segment to hold all the data
        block = Block()
        # Search data directory for KlustaKwik files.
        # If nothing found, return empty block
        self._fetfiles = self._fp.read_filenames('fet')
        self._clufiles = self._fp.read_filenames('clu')
        if len(self._fetfiles) == 0:
            return block

        # Create a single segment to hold all of the data
        seg = Segment(name='seg0', index=0, file_origin=self.filename)
        block.segments.append(seg)

        # Load spike times from each group and store in a dict, keyed
        # by group number
        self.spiketrains = dict()
        for group in sorted(self._fetfiles.keys()):
            # Load spike times
            fetfile = self._fetfiles[group]
            spks, features = self._load_spike_times(fetfile)

            # Load cluster ids or generate
            if group in self._clufiles:
                clufile = self._clufiles[group]
                uids = self._load_unit_id(clufile)
            else:
                # unclustered data, assume all zeros
                uids = np.zeros(spks.shape, dtype=np.int32)

            # error check
            if len(spks) != len(uids):
                raise ValueError("lengths of fet and clu files are different")

            # Create Unit for each cluster
            unique_unit_ids = np.unique(uids)
            for unit_id in sorted(unique_unit_ids):
                # Initialize the unit
                u = Unit(name=('unit %d from group %d' % (unit_id, group)),
                         index=unit_id, group=group)

                # Initialize a new SpikeTrain for the spikes from this unit
                st = SpikeTrain(
                    times=spks[uids == unit_id] / self.sampling_rate,
                    units='sec', t_start=0.0,
                    t_stop=spks.max() / self.sampling_rate,
                    name=('unit %d from group %d' % (unit_id, group)))
                st.annotations['cluster'] = unit_id
                st.annotations['group'] = group

                # put features in
                if len(features) != 0:
                    st.annotations['waveform_features'] = features

                # Link
                u.spiketrains.append(st)
                seg.spiketrains.append(st)

        block.create_many_to_one_relationship()
        return block
    def read_one_channel_event_or_spike(self, fid, channel_num, header,
                                        lazy=True):
        # return SPikeTrain or Event
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock < 0:
            return
        if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
            return

        # # Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick', 'i4')]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick', 'i4'), ('marker', 'i4')]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('adc', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('real', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('label', 'S%d' % channelHeader.n_extra)]
        dt = np.dtype(fmt)

        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0:
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems

        if lazy:
            if channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = Event()
                ea.annotate(channel_index=channel_num)
                ea.lazy_shape = totalitems
                return ea

            elif channelHeader.kind in [6, 7]:
                # correct value for t_stop to be put in later
                sptr = SpikeTrain([] * pq.s, t_stop=1e99)
                sptr.annotate(channel_index=channel_num, ced_unit = 0)
                sptr.lazy_shape = totalitems
                return sptr
        else:
            alltrigs = np.zeros(totalitems, dtype=dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks):
                blockHeader = HeaderReader(
                    fid, np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring(
                    fid.read(blockHeader.items * dt.itemsize), dtype=dt)

                alltrigs[pos:pos + trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class: eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype(
                'f') * header.us_per_time * header.dtime_base * pq.s

            if channelHeader.kind in [2, 3, 4, 5, 8]:
                #events
                ea = Event(alltimes)
                ea.annotate(channel_index=channel_num)
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels=alltrigs['label'])
                return ea

            elif channelHeader.kind in [6, 7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6:
                    waveforms = np.fromstring(alltrigs['adc'].tostring(),
                                              dtype='i2')
                    waveforms = waveforms.astype(
                        'f4') * channelHeader.scale / 6553.6 + \
                        channelHeader.offset
                elif channelHeader.kind == 7:
                    waveforms = np.fromstring(alltrigs['real'].tostring(),
                                              dtype='f4')

                if header.system_id >= 6 and channelHeader.interleave > 1:
                    waveforms = waveforms.reshape(
                        (alltimes.size, -1, channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1, 2)
                else:
                    waveforms = waveforms.reshape((alltimes.size, 1, -1))

                if header.system_id in [1, 2, 3, 4, 5]:
                    sample_interval = (channelHeader.divide *
                                       header.us_per_time *
                                       header.time_per_adc) * 1e-6
                else:
                    sample_interval = (channelHeader.l_chan_dvd *
                                       header.us_per_time *
                                       header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit])
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit)
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    # can get better value from associated AnalogSignal(s) ?
                    t_stop = alltimes.max()
                else:
                    t_stop = 0.0

                if not self.ced_units:
                    sptr = SpikeTrain(alltimes,
                                                waveforms = waveforms*unit,
                                                sampling_rate = (1./sample_interval)*pq.Hz,
                                                t_stop = t_stop
                                                )
                    sptr.annotate(channel_index = channel_num, ced_unit = 0)
                    return [sptr]

                sptrs = []
                for i in set(alltrigs['marker'] & 255):
                    sptr = SpikeTrain(alltimes[alltrigs['marker'] == i],
                                                waveforms = waveforms[alltrigs['marker'] == i]*unit,
                                                sampling_rate = (1./sample_interval)*pq.Hz,
                                                t_stop = t_stop
                                                )
                    sptr.annotate(channel_index = channel_num, ced_unit = i)
                    sptrs.append(sptr)

                return sptrs
Example #50
0
    def read_nev(self, filename_nev, seg, lazy, cascade, load_waveforms = False):
        # basic header
        dt = [('header_id','S8'),
                    ('ver_major','uint8'),
                    ('ver_minor','uint8'),
                    ('additionnal_flag', 'uint16'), # Read flags, currently basically unused
                    ('header_size', 'uint32'), #i.e. index of first data
                    ('packet_size', 'uint32'),# Read number of packet bytes, i.e. byte per sample
                    ('sampling_rate', 'uint32'),# Read time resolution in Hz of time stamps, i.e. data packets
                    ('waveform_sampling_rate', 'uint32'),# Read sampling frequency of waveforms in Hz
                    ('window_datetime', 'S16'),
                    ('application', 'S32'), # 
                    ('comments', 'S256'), # comments
                    ('num_ext_header', 'uint32') #Read number of extended headers
                    
                ]
        nev_header = h = np.fromfile(filename_nev, count = 1, dtype = dt)[0]
        version = '{0}.{1}'.format(h['ver_major'], h['ver_minor'])
        assert h['header_id'].decode('ascii') == 'NEURALEV' or version == '2.1', 'Unsupported version {0}'.format(version)
        version = '{0}.{1}'.format(h['ver_major'], h['ver_minor'])
        seg.annotate(blackrock_version = version)
        seg.rec_datetime = get_window_datetime(nev_header['window_datetime'])
        sr = float(h['sampling_rate'])
        wsr = float(h['waveform_sampling_rate'])
        
        if not cascade:
            return
        
        # extented header
        # this consist in N block with code 8bytes + 24 data bytes
        # the data bytes depend on the code and need to be converted cafilename_nsx, segse by case
        raw_ext_header = np.memmap(filename_nev, offset = np.dtype(dt).itemsize,
                                                dtype = [('code', 'S8'), ('data', 'S24')],  shape = h['num_ext_header'])
        # this is for debuging
        ext_header = { }
        for code, dt_ext in ext_nev_header_codes.items():
            sel = raw_ext_header['code']==code
            ext_header[code] = raw_ext_header[sel].view(dt_ext)
        
        
        # channel label
        neuelbl_header = ext_header['NEUEVLBL']
        # Sometimes when making the channel labels we have only one channel and so must address it differently.
        try:
            channel_labels = dict(zip(neuelbl_header['channel_id'], neuelbl_header['channel_label']))
        except TypeError:
            channel_labels = dict([(neuelbl_header['channel_id'], neuelbl_header['channel_label'])])

        # TODO ext_header['DIGLABEL'] is there only one label ???? because no id in that case
        # TODO ECOMMENT + CCOMMENT for annotations
        # TODO NEUEVFLT for annotations
        
        
        # read data packet and markers
        dt0 =  [('samplepos', 'uint32'),
                    ('id', 'uint16'), 
                    ('value', 'S{0}'.format(h['packet_size']-6)),
            ]
        data = np.memmap( filename_nev, offset = h['header_size'], dtype = dt0)
        all_ids = np.unique(data['id'])
        
        t_start = 0*pq.s
        t_stop = data['samplepos'][-1]/sr*pq.s
        
        
        
        # read event (digital 9+ analog+comment)
        def create_event_array_trig_or_analog(selection, name, labelmode = None):
            if lazy:
                times = [ ]
                labels = np.array([ ], dtype = 'S')
            else:
                times = data_trigger['samplepos'][selection].astype(float)/sr
                if labelmode == 'digital_port':
                    labels = data_trigger['digital_port'][selection].astype('S2')
                elif labelmode is None:
                    label = None
            ev = EventArray(times= times*pq.s,
                            labels= labels,
                            name=name)
            if lazy:
                ev.lazy_shape = np.sum(is_digital)
            seg.eventarrays.append(ev)

        mask = (data['id']==0) 
        dt_trig =  [('samplepos', 'uint32'),
                    ('id', 'uint16'), 
                    ('reason', 'uint8'), 
                    ('reserved0', 'uint8'), 
                    ('digital_port', 'uint16'), 
                    ('reserved1', 'S{0}'.format(h['packet_size']-10)),
                ]
        data_trigger = data.view(dt_trig)[mask]
        # Digital Triggers (PaquetID 0)
        is_digital = (data_trigger ['reason']&1)>0
        create_event_array_trig_or_analog(is_digital, 'Digital trigger', labelmode =  'digital_port' )
        
        # Analog Triggers (PaquetID 0)
        if version in ['2.1', '2.2' ]:
            for i in range(5):
                is_analog = (data_trigger ['reason']&(2**(i+1)))>0
                create_event_array_trig_or_analog(is_analog, 'Analog trigger {0}'.format(i), labelmode = None)
        
        # Comments
        mask = (data['id']==0xFFF) 
        dt_comments = [('samplepos', 'uint32'),
                    ('id', 'uint16'), 
                    ('charset', 'uint8'), 
                    ('reserved0', 'uint8'), 
                    ('color', 'uint32'), 
                    ('comment', 'S{0}'.format(h['packet_size']-12)),
                ]
        data_comments = data.view(dt_comments)[mask]
        if data_comments.size>0:
            if lazy:
                times = [ ]
                labels = [ ]
            else:
                times = data_comments['samplepos'].astype(float)/sr
                labels = data_comments['comment'].astype('S')
            ev = EventArray(times= times*pq.s,
                            labels= labels,
                            name='Comments')
            if lazy:
                ev.lazy_shape = np.sum(is_digital)
            seg.eventarrays.append(ev)
        
        
        # READ Spike channel
        channel_ids = all_ids[(all_ids>0) & (all_ids<=2048)]

        # get the dtype of waveform (this is stupidly complicated)
        if nev_header['additionnal_flag']&0x1:
            #dtype_waveforms = { k:'int16' for k in channel_ids }
            dtype_waveforms = dict( (k,'int16') for k in channel_ids)
        else:
            # there is a code electrodes by electrodes given the approiate dtype
            neuewav_header = ext_header['NEUEVWAV']
            dtype_waveform = dict(zip(neuewav_header['channel_id'], neuewav_header['num_bytes_per_waveform']))
            dtypes_conv = { 0: 'int8', 1 : 'int8', 2: 'int16', 4 : 'int32' }
            #dtype_waveforms = { k:dtypes_conv[v] for k,v in dtype_waveform.items() }
            dtype_waveforms = dict( (k,dtypes_conv[v]) for k,v in dtype_waveform.items() )
        
        dt2 =   [('samplepos', 'uint32'),
                    ('id', 'uint16'), 
                    ('cluster', 'uint8'), 
                    ('reserved0', 'uint8'), 
                    ('waveform','uint8',(h['packet_size']-8, )),
                ]
        data_spike = data.view(dt2)
        
        for channel_id in channel_ids:
            data_spike_chan = data_spike[data['id']==channel_id]
            cluster_ids = np.unique(data_spike_chan['cluster'])
            for cluster_id in cluster_ids:
                if cluster_id==0: 
                    name =  'unclassified'
                elif cluster_id==255:
                    name =  'noise'
                else:
                    name = 'Cluster {0}'.format(cluster_id)
                name = 'Channel {0} '.format(channel_id)+name
                
                data_spike_chan_clus = data_spike_chan[data_spike_chan['cluster']==cluster_id]
                n_spike = data_spike_chan_clus.size
                waveforms, w_sampling_rate, left_sweep = None, None, None
                if lazy:
                    times = [ ]
                else:
                    times = data_spike_chan_clus['samplepos'].astype(float)/sr
                    if load_waveforms:
                        dtype_waveform = dtype_waveforms[channel_id]
                        waveform_size = (h['packet_size']-8)/np.dtype(dtype_waveform).itemsize
                        waveforms = data_spike_chan_clus['waveform'].flatten().view(dtype_waveform)
                        waveforms = waveforms.reshape(n_spike,1, waveform_size)
                        waveforms =waveforms*pq.uV
                        w_sampling_rate = wsr*pq.Hz
                        left_sweep = waveform_size//2/sr*pq.s
                st = SpikeTrain(times =  times*pq.s, name = name,
                                t_start = t_start, t_stop =t_stop,
                                waveforms = waveforms, sampling_rate = w_sampling_rate, left_sweep = left_sweep)
                st.annotate(channel_index = int(channel_id))
                if lazy:
                    st.lazy_shape = n_spike
                seg.spiketrains.append(st)
Example #51
0
    def read_segment(self, blockname=None, lazy=False, cascade=True, sortname=''):
        """
        Read a single segment from the tank. Note that TDT blocks are Neo
        segments, and TDT tanks are Neo blocks, so here the 'blockname' argument
        refers to the TDT block's name, which will be the Neo segment name.
        sortname is used to specify the external sortcode generated by offline spike sorting, if sortname=='PLX',
        there should be a ./sort/PLX/*.SortResult file in the tdt block, which stores the sortcode for every spike,
        default to '', which uses the original online sort
        """
        if not blockname:
            blockname = os.listdir(self.dirname)[0]

        if blockname == 'TempBlk': return None

        if not self.is_tdtblock(blockname): return None    # if not a tdt block

        subdir = os.path.join(self.dirname, blockname)
        if not os.path.isdir(subdir): return None

        seg = Segment(name=blockname)

        tankname = os.path.basename(self.dirname)

        #TSQ is the global index
        tsq_filename = os.path.join(subdir, tankname+'_'+blockname+'.tsq')
        dt = [('size','int32'),
                    ('evtype','int32'),
                    ('code','S4'),
                    ('channel','uint16'),
                    ('sortcode','uint16'),
                    ('timestamp','float64'),
                    ('eventoffset','int64'),
                    ('dataformat','int32'),
                    ('frequency','float32'),
                ]
        tsq = np.fromfile(tsq_filename, dtype=dt)

        #0x8801: 'EVTYPE_MARK' give the global_start
        global_t_start = tsq[tsq['evtype']==0x8801]['timestamp'][0]

        #TEV is the old data file
        try:
            tev_filename = os.path.join(subdir, tankname+'_'+blockname+'.tev')
            #tev_array = np.memmap(tev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
            tev_array = np.fromfile(tev_filename, dtype='uint8')
        except IOError:
            tev_filename = None


        #if exists an external sortcode in ./sort/[sortname]/*.SortResult (generated after offline sortting)
        sortresult_filename = None
        if sortname is not '':
            try:
                for file in os.listdir(os.path.join(subdir, 'sort', sortname)):
                    if file.endswith(".SortResult"):
                        sortresult_filename = os.path.join(subdir, 'sort', sortname, file)

                        # get new sortcode
                        newsorcode = np.fromfile(sortresult_filename,'int8')[1024:]  # the first 1024 byte is file header
                        # update the sort code with the info from this file
                        tsq['sortcode'][1:-1]=newsorcode
                        # print('sortcode updated')
                        break
            except OSError:
                sortresult_filename = None
            except IOError:
                sortresult_filename = None


        for type_code, type_label in tdt_event_type:
            mask1 = tsq['evtype']==type_code
            codes = np.unique(tsq[mask1]['code'])

            for code in codes:
                mask2 = mask1 & (tsq['code']==code)
                channels = np.unique(tsq[mask2]['channel'])

                for channel in channels:
                    mask3 = mask2 & (tsq['channel']==channel)

                    if type_label in ['EVTYPE_STRON', 'EVTYPE_STROFF']:
                        if lazy:
                            times = [ ]*pq.s
                            labels = np.array([ ], dtype=str)
                        else:
                            times = (tsq[mask3]['timestamp'] - global_t_start) * pq.s
                            labels = tsq[mask3]['eventoffset'].view('float64').astype('S')
                        ea = Event(times=times,
                                   name=code,
                                   channel_index=int(channel),
                                   labels=labels)
                        if lazy:
                            ea.lazy_shape = np.sum(mask3)
                        seg.events.append(ea)

                    elif type_label == 'EVTYPE_SNIP':
                        sortcodes = np.unique(tsq[mask3]['sortcode'])
                        for sortcode in sortcodes:
                            mask4 = mask3 & (tsq['sortcode']==sortcode)
                            nb_spike = np.sum(mask4)
                            sr = tsq[mask4]['frequency'][0]
                            waveformsize = tsq[mask4]['size'][0]-10
                            if lazy:
                                times = [ ]*pq.s
                                waveforms = None
                            else:
                                times = (tsq[mask4]['timestamp'] - global_t_start) * pq.s
                                dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
                                waveforms = get_chunks(tsq[mask4]['size'],tsq[mask4]['eventoffset'], tev_array).view(dt)
                                waveforms = waveforms.reshape(nb_spike, -1, waveformsize)
                                waveforms = waveforms * pq.mV
                            if nb_spike > 0:
                             #   t_start = (tsq['timestamp'][0] - global_t_start) * pq.s # this hould work but not
                                t_start = 0 *pq.s
                                t_stop = (tsq['timestamp'][-1] - global_t_start) * pq.s

                            else:
                                t_start = 0 *pq.s
                                t_stop = 0 *pq.s
                            st = SpikeTrain(times           = times,
                                            name            = 'Chan{0} Code{1}'.format(channel,sortcode),
                                            t_start         = t_start,
                                            t_stop          = t_stop,
                                            waveforms       = waveforms,
                                            left_sweep      = waveformsize/2./sr * pq.s,
                                            sampling_rate   = sr * pq.Hz,
                                            )
                            st.annotate(channel_index=channel)
                            if lazy:
                                st.lazy_shape = nb_spike
                            seg.spiketrains.append(st)

                    elif type_label == 'EVTYPE_STREAM':
                        dt = np.dtype(data_formats[ tsq[mask3]['dataformat'][0]])
                        shape = np.sum(tsq[mask3]['size']-10)
                        sr = tsq[mask3]['frequency'][0]
                        if lazy:
                            signal = [ ]
                        else:
                            if PY3K:
                                signame = code.decode('ascii')
                            else:
                                signame = code
                            sev_filename = os.path.join(subdir, tankname+'_'+blockname+'_'+signame+'_ch'+str(channel)+'.sev')
                            try:
                                #sig_array = np.memmap(sev_filename, mode = 'r', dtype = 'uint8') # if memory problem use this instead
                                sig_array = np.fromfile(sev_filename, dtype='uint8')
                            except IOError:
                                sig_array = tev_array
                            signal = get_chunks(tsq[mask3]['size'],tsq[mask3]['eventoffset'],  sig_array).view(dt)

                        anasig = AnalogSignal(signal        = signal* pq.V,
                                              name          = '{0} {1}'.format(code, channel),
                                              sampling_rate = sr * pq.Hz,
                                              t_start       = (tsq[mask3]['timestamp'][0] - global_t_start) * pq.s,
                                              channel_index = int(channel)
                                              )
                        if lazy:
                            anasig.lazy_shape = shape
                        seg.analogsignals.append(anasig)
        return seg
Example #52
0
    def read_segment(self, lazy=False, cascade=True):
        fid = open(self.filename, 'rb')
        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
        # ~ print globalHeader
        #~ print 'version' , globalHeader['version']
        seg = Segment()
        seg.file_origin = os.path.basename(self.filename)
        seg.annotate(neuroexplorer_version=global_header['version'])
        seg.annotate(comment=global_header['comment'])

        if not cascade:
            return seg

        offset = 544
        for i in range(global_header['nvar']):
            entity_header = HeaderReader(fid, EntityHeader).read_f(
                offset=offset + i * 208)
            entity_header['name'] = entity_header['name'].replace('\x00', '')

            #print 'i',i, entityHeader['type']

            if entity_header['type'] == 0:
                # neuron
                if lazy:
                    spike_times = [] * pq.s
                else:
                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    t_stop=global_header['tend'] /
                    global_header['freq'] * pq.s,
                    name=entity_header['name'])
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 1:
                # event
                if lazy:
                    event_times = [] * pq.s
                else:
                    event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    event_times = event_times.astype('f8') / global_header[
                        'freq'] * pq.s
                labels = np.array([''] * event_times.size, dtype='S')
                evar = Event(times=event_times, labels=labels,
                             channel_name=entity_header['name'])
                if lazy:
                    evar.lazy_shape = entity_header['n']
                seg.events.append(evar)

            if entity_header['type'] == 2:
                # interval
                if lazy:
                    start_times = [] * pq.s
                    stop_times = [] * pq.s
                else:
                    start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    start_times = start_times.astype('f8') / global_header[
                        'freq'] * pq.s
                    stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                           shape=(entity_header['n']),
                                           offset=entity_header['offset'] +
                                           entity_header['n'] * 4)
                    stop_times = stop_times.astype('f') / global_header[
                        'freq'] * pq.s
                epar = Epoch(times=start_times,
                             durations=stop_times - start_times,
                             labels=np.array([''] * start_times.size,
                                             dtype='S'),
                             channel_name=entity_header['name'])
                if lazy:
                    epar.lazy_shape = entity_header['n']
                seg.epochs.append(epar)

            if entity_header['type'] == 3:
                # spiketrain and wavefoms
                if lazy:
                    spike_times = [] * pq.s
                    waveforms = None
                else:

                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                    spike_times = spike_times.astype('f8') / global_header[
                        'freq'] * pq.s

                    waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
                                          shape=(entity_header['n'], 1,
                                                 entity_header['NPointsWave']),
                                          offset=entity_header['offset'] +
                                          entity_header['n'] * 4)
                    waveforms = (waveforms.astype('f') *
                                 entity_header['ADtoMV'] +
                                 entity_header['MVOffset']) * pq.mV
                t_stop = global_header['tend'] / global_header['freq'] * pq.s
                if spike_times.size > 0:
                    t_stop = max(t_stop, max(spike_times))
                sptr = SpikeTrain(
                    times=spike_times,
                    t_start=global_header['tbeg'] /
                    global_header['freq'] * pq.s,
                    #~ t_stop = max(globalHeader['tend']/
                    #~ globalHeader['freq']*pq.s,max(spike_times)),
                    t_stop=t_stop, name=entity_header['name'],
                    waveforms=waveforms,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    left_sweep=0 * pq.ms)
                if lazy:
                    sptr.lazy_shape = entity_header['n']
                sptr.annotate(channel_index=entity_header['WireNumber'])
                seg.spiketrains.append(sptr)

            if entity_header['type'] == 4:
                # popvectors
                pass

            if entity_header['type'] == 5:
                # analog
                timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
                                       shape=(entity_header['n']),
                                       offset=entity_header['offset'])
                timestamps = timestamps.astype('f8') / global_header['freq']
                fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
                                            shape=(entity_header['n']),
                                            offset=entity_header['offset'])
                fragment_starts = fragment_starts.astype('f8') / global_header[
                    'freq']
                t_start = timestamps[0] - fragment_starts[0] / float(
                    entity_header['WFrequency'])
                del timestamps, fragment_starts

                if lazy:
                    signal = [] * pq.mV
                else:
                    signal = np.memmap(self.filename, np.dtype('i2'), 'r',
                                       shape=(entity_header['NPointsWave']),
                                       offset=entity_header['offset'])
                    signal = signal.astype('f')
                    signal *= entity_header['ADtoMV']
                    signal += entity_header['MVOffset']
                    signal = signal * pq.mV

                ana_sig = AnalogSignal(
                    signal=signal, t_start=t_start * pq.s,
                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
                    name=entity_header['name'],
                    channel_index=entity_header['WireNumber'])
                if lazy:
                    ana_sig.lazy_shape = entity_header['NPointsWave']
                seg.analogsignals.append(ana_sig)

            if entity_header['type'] == 6:
                # markers  : TO TEST
                if lazy:
                    times = [] * pq.s
                    labels = np.array([], dtype='S')
                    markertype = None
                else:
                    times = np.memmap(self.filename, np.dtype('i4'), 'r',
                                      shape=(entity_header['n']),
                                      offset=entity_header['offset'])
                    times = times.astype('f8') / global_header['freq'] * pq.s
                    fid.seek(entity_header['offset'] + entity_header['n'] * 4)
                    markertype = fid.read(64).replace('\x00', '')
                    labels = np.memmap(
                        self.filename, np.dtype(
                            'S' + str(entity_header['MarkerLength'])),
                        'r', shape=(entity_header['n']),
                        offset=entity_header['offset'] +
                        entity_header['n'] * 4 + 64)
                ea = Event(times=times,
                           labels=labels.view(np.ndarray),
                           name=entity_header['name'],
                           channel_index=entity_header['WireNumber'],
                           marker_type=markertype)
                if lazy:
                    ea.lazy_shape = entity_header['n']
                seg.events.append(ea)

        seg.create_many_to_one_relationship()
        return seg
def generate_one_simple_segment(seg_name='segment 0',
                                supported_objects=[],
                                nb_analogsignal=4,
                                t_start=0.*pq.s,
                                sampling_rate=10*pq.kHz,
                                duration=6.*pq.s,

                                nb_spiketrain=6,
                                spikerate_range=[.5*pq.Hz, 12*pq.Hz],

                                event_types={'stim': ['a', 'b',
                                                      'c', 'd'],
                                             'enter_zone': ['one',
                                                            'two'],
                                             'color': ['black',
                                                       'yellow',
                                                       'green'],
                                             },
                                event_size_range=[5, 20],

                                epoch_types={'animal state': ['Sleep',
                                                              'Freeze',
                                                              'Escape'],
                                             'light': ['dark',
                                                       'lighted']
                                             },
                                epoch_duration_range=[.5, 3.],

                                ):
    if supported_objects and Segment not in supported_objects:
        raise ValueError('Segment must be in supported_objects')
    seg = Segment(name=seg_name)
    if AnalogSignal in supported_objects:
        for a in range(nb_analogsignal):
            anasig = AnalogSignal(rand(int(sampling_rate * duration)),
                                       sampling_rate=sampling_rate, t_start=t_start,
                                       units=pq.mV, channel_index=a,
                                       name='sig %d for segment %s' % (a, seg.name))
            seg.analogsignals.append(anasig)

    if SpikeTrain in supported_objects:
        for s in range(nb_spiketrain):
            spikerate = rand()*np.diff(spikerate_range)
            spikerate += spikerate_range[0].magnitude
            #spikedata = rand(int((spikerate*duration).simplified))*duration
            #sptr = SpikeTrain(spikedata,
            #                  t_start=t_start, t_stop=t_start+duration)
            #                  #, name = 'spiketrain %d'%s)
            spikes = rand(int((spikerate*duration).simplified))
            spikes.sort()  # spikes are supposed to be an ascending sequence
            sptr = SpikeTrain(spikes*duration,
                              t_start=t_start, t_stop=t_start+duration)
            sptr.annotations['channel_index'] = s
            seg.spiketrains.append(sptr)

    if Event in supported_objects:
        for name, labels in event_types.items():
            evt_size = rand()*np.diff(event_size_range)
            evt_size += event_size_range[0]
            evt_size = int(evt_size)
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(evt_size)*len(labels)).astype('i')]
            evt = Event(times=rand(evt_size)*duration, labels=labels)
            seg.events.append(evt)

    if Epoch in supported_objects:
        for name, labels in epoch_types.items():
            t = 0
            times = []
            durations = []
            while t < duration:
                times.append(t)
                dur = rand()*np.diff(epoch_duration_range)
                dur += epoch_duration_range[0]
                durations.append(dur)
                t = t+dur
            labels = np.array(labels, dtype='S')
            labels = labels[(rand(len(times))*len(labels)).astype('i')]
            epc = Epoch(times=pq.Quantity(times, units=pq.s),
                        durations=pq.Quantity([x[0] for x in durations],
                                              units=pq.s),
                        labels=labels,
                        )
            seg.epochs.append(epc)

    # TODO : Spike, Event

    seg.create_many_to_one_relationship()
    return seg