Ejemplo n.º 1
0
    def read_segment(
        self,
        cascade=True,
        lazy=False,
        sampling_rate=1. * pq.Hz,
        t_start=0. * pq.s,
        unit=pq.V,
        nbchannel=1,
        bytesoffset=0,
        dtype='f4',
        rangemin=-10,
        rangemax=10,
    ):
        """
        Reading signal in a raw binary interleaved compact file.

        Arguments:
            sampling_rate :  sample rate
            t_start : time of the first sample sample of each channel
            unit: unit of AnalogSignal can be a str or directly a Quantities
            nbchannel : number of channel
            bytesoffset : nb of bytes offset at the start of file

            dtype : dtype of the data
            rangemin , rangemax : if the dtype is integer, range can give in volt the min and the max of the range
        """
        seg = Segment(file_origin=os.path.basename(self.filename))
        if not cascade:
            return seg

        dtype = np.dtype(dtype)

        if type(sampling_rate) == float or type(sampling_rate) == int:
            # if not quantitities Hz by default
            sampling_rate = sampling_rate * pq.Hz

        if type(t_start) == float or type(t_start) == int:
            # if not quantitities s by default
            t_start = t_start * pq.s

        unit = pq.Quantity(1, unit)

        if lazy:
            sig = []
        else:
            sig = np.memmap(self.filename,
                            dtype=dtype,
                            mode='r',
                            offset=bytesoffset)
            if sig.size % nbchannel != 0:
                sig = sig[:-sig.size % nbchannel]
            sig = sig.reshape((sig.size / nbchannel, nbchannel))
            if dtype.kind == 'i':
                sig = sig.astype('f')
                sig /= 2**(8 * dtype.itemsize)
                sig *= (rangemax - rangemin)
                sig += (rangemax + rangemin) / 2.
            elif dtype.kind == 'u':
                sig = sig.astype('f')
                sig /= 2**(8 * dtype.itemsize)
                sig *= (rangemax - rangemin)
                sig += rangemin

        anaSig = AnalogSignal(sig,
                              units=unit,
                              sampling_rate=sampling_rate,
                              t_start=t_start,
                              copy=False)

        if lazy:
            # TODO
            anaSig.lazy_shape = None
        seg.analogsignals.append(anaSig)
        seg.create_many_to_one_relationship()
        return seg
Ejemplo n.º 2
0
def spike_train_timescale(binned_spiketrain, max_tau):
    r"""
    Calculates the auto-correlation time of a binned spike train; uses the
    definition of the auto-correlation time proposed in
    :cite:`correlation-Wieland2015_040901` (Eq. 6):

    .. math::
        \tau_\mathrm{corr} = \int_{-\tau_\mathrm{max}}^{\tau_\mathrm{max}}\
            \left[ \frac{\hat{C}(\tau)}{\hat{C}(0)} \right]^2 d\tau

    where :math:`\hat{C}(\tau) = C(\tau)-\nu\delta(\tau)` denotes
    the auto-correlation function excluding the Dirac delta at zero timelag.

    Parameters
    ----------
    binned_spiketrain : elephant.conversion.BinnedSpikeTrain
        A binned spike train containing the spike train to be evaluated.
    max_tau : pq.Quantity
        Maximal integration time :math:`\tau_{max}` of the auto-correlation
        function. It needs to be a multiple of the `bin_size` of
        `binned_spiketrain`.

    Returns
    -------
    timescale : pq.Quantity
        The auto-correlation time of the binned spiketrain with the same units
        as in the input. If `binned_spiketrain` has less than 2 spikes, a
        warning is raised and `np.nan` is returned.

    Notes
    -----
    * :math:`\tau_\mathrm{max}` is a critical parameter: numerical estimates
      of the auto-correlation functions are inherently noisy. Due to the
      square in the definition above, this noise is integrated. Thus, it is
      necessary to introduce a cutoff for the numerical integration - this
      cutoff should be neither smaller than the true auto-correlation time
      nor much bigger.
    * The bin size of `binned_spiketrain` is another critical parameter as it
      defines the discretization of the integral :math:`d\tau`. If it is too
      big, the numerical approximation of the integral is inaccurate.

    Examples
    --------
    >>> import neo
    >>> import numpy as np
    >>> import quantities as pq
    >>> from elephant.spike_train_correlation import spike_train_timescale
    >>> from elephant.conversion import BinnedSpikeTrain
    >>> spiketrain = neo.SpikeTrain([1, 5, 7, 8], units='ms', t_stop=10*pq.ms)
    >>> bst = BinnedSpikeTrain(spiketrain, bin_size=1 * pq.ms)
    >>> spike_train_timescale(bst, max_tau=5 * pq.ms)
    array(14.11111111) * ms

    """
    if binned_spiketrain.get_num_of_spikes() < 2:
        warnings.warn("Spike train contains less than 2 spikes! "
                      "np.nan will be returned.")
        return np.nan

    bin_size = binned_spiketrain._bin_size
    try:
        max_tau = max_tau.rescale(binned_spiketrain.units).item()
    except (AttributeError, ValueError):
        raise ValueError("max_tau needs units of time")

    # safe casting of max_tau/bin_size to integer
    max_tau_bins = int(round(max_tau / bin_size))
    if not np.isclose(max_tau, max_tau_bins * bin_size):
        raise ValueError("max_tau has to be a multiple of the bin_size")

    cch_window = [-max_tau_bins, max_tau_bins]
    corrfct, bin_ids = cross_correlation_histogram(
        binned_spiketrain,
        binned_spiketrain,
        window=cch_window,
        cross_correlation_coefficient=True)
    # Take only t > 0 values, in particular neglecting the delta peak.
    start_id = corrfct.time_index((bin_size / 2) * binned_spiketrain.units)
    corrfct = corrfct.magnitude.squeeze()[start_id:]

    # Calculate the timescale using trapezoidal integration
    integr = (corrfct / corrfct[0])**2
    timescale = 2 * integrate.trapz(integr, dx=bin_size)
    return pq.Quantity(timescale, units=binned_spiketrain.units, copy=False)
Ejemplo n.º 3
0
    def load(self,
             time_slice=None,
             strict_slicing=True,
             magnitude_mode='rescaled',
             load_waveforms=False):
        '''
        *Args*:
            :time_slice: None or tuple of the time slice expressed with quantities.
                            None is the entire signal.
            :strict_slicing: True by default.
                 Control if an error is raise or not when one of  time_slice
                 member (t_start or t_stop) is outside the real time range of the segment.
            :magnitude_mode: 'rescaled' or 'raw'.
            :load_waveforms: bool load waveforms or not.
        '''

        t_start, t_stop = consolidate_time_slice(time_slice, self.t_start,
                                                 self.t_stop, strict_slicing)
        _t_start, _t_stop = prepare_time_slice(time_slice)

        spike_timestamps = self._rawio.get_spike_timestamps(
            block_index=self._block_index,
            seg_index=self._seg_index,
            unit_index=self._unit_index,
            t_start=_t_start,
            t_stop=_t_stop)

        if magnitude_mode == 'raw':
            # we must modify a bit the neo.rawio interface to also read the spike_timestamps
            # underlying clock wich is not always same as sigs
            raise (NotImplementedError)
        elif magnitude_mode == 'rescaled':
            dtype = 'float64'
            spike_times = self._rawio.rescale_spike_timestamp(spike_timestamps,
                                                              dtype=dtype)
            units = 's'

        if load_waveforms:
            assert self.sampling_rate is not None, 'Do not have waveforms'

            raw_wfs = self._rawio.get_spike_raw_waveforms(
                block_index=self._block_index,
                seg_index=self._seg_index,
                unit_index=self._unit_index,
                t_start=_t_start,
                t_stop=_t_stop)
            if magnitude_mode == 'rescaled':
                float_wfs = self._rawio.rescale_waveforms_to_float(
                    raw_wfs, dtype='float32', unit_index=self._unit_index)
                waveforms = pq.Quantity(float_wfs,
                                        units=self._wf_units,
                                        dtype='float32',
                                        copy=False)
            elif magnitude_mode == 'raw':
                # could code also CompundUnit here but it is over killed
                # so we used dimentionless
                waveforms = pq.Quantity(raw_wfs,
                                        units='',
                                        dtype=raw_wfs.dtype,
                                        copy=False)
        else:
            waveforms = None

        sptr = SpikeTrain(spike_times,
                          t_stop,
                          units=units,
                          dtype=dtype,
                          t_start=t_start,
                          copy=False,
                          sampling_rate=self.sampling_rate,
                          waveforms=waveforms,
                          left_sweep=self.left_sweep,
                          name=self.name,
                          file_origin=self.file_origin,
                          description=self.description,
                          **self.annotations)

        return sptr
def test_lcc25_dwell():
    with expected_protocol(ik.thorlabs.LCC25, ["dwell?", "dwell=10"],
                           ["dwell?", "20", ">dwell=10", ">"],
                           sep="\r") as lcc:
        unit_eq(lcc.dwell, pq.Quantity(20, "ms"))
        lcc.dwell = 10
def test_lcc25_frequency():
    with expected_protocol(ik.thorlabs.LCC25, ["freq?", "freq=10.0"],
                           ["freq?", "20", ">freq=10.0", ">"],
                           sep="\r") as lcc:
        unit_eq(lcc.frequency, pq.Quantity(20, "Hz"))
        lcc.frequency = 10.0
Ejemplo n.º 6
0
    def read_one_channel_continuous(self,
                                    fid,
                                    channel_num,
                                    header,
                                    take_ideal_sampling_rate,
                                    lazy=True):
        # read AnalogSignal
        channelHeader = header.channelHeaders[channel_num]

        # data type
        if channelHeader.kind == 1:
            dt = np.dtype('i2')
        elif channelHeader.kind == 9:
            dt = np.dtype('f4')

        # sample rate
        if take_ideal_sampling_rate:
            sampling_rate = channelHeader.ideal_rate * pq.Hz
        else:
            if header.system_id in [1, 2, 3, 4, 5]:  # Before version 5
                #~ print channel_num, channelHeader.divide, \
                #~ header.us_per_time, header.time_per_adc
                sample_interval = (channelHeader.divide * header.us_per_time *
                                   header.time_per_adc) * 1e-6
            else:
                sample_interval = (channelHeader.l_chan_dvd *
                                   header.us_per_time * header.dtime_base)
            sampling_rate = (1. / sample_interval) * pq.Hz

        # read blocks header to preallocate memory by jumping block to block
        if channelHeader.blocks == 0:
            return []
        fid.seek(channelHeader.firstblock)
        blocksize = [0]
        starttimes = []
        for b in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            if len(blocksize) > len(starttimes):
                starttimes.append(blockHeader.start_time)
            blocksize[-1] += blockHeader.items

            if blockHeader.succ_block > 0:
                # ugly but CED does not guarantee continuity in AnalogSignal
                fid.seek(blockHeader.succ_block)
                nextBlockHeader = HeaderReader(fid,
                                               np.dtype(blockHeaderDesciption))
                sample_interval = (blockHeader.end_time -
                                   blockHeader.start_time) / \
                                  (blockHeader.items - 1)
                interval_with_next = nextBlockHeader.start_time - \
                    blockHeader.end_time
                if interval_with_next > sample_interval:
                    blocksize.append(0)
                fid.seek(blockHeader.succ_block)

        ana_sigs = []
        if channelHeader.unit in unit_convert:
            unit = pq.Quantity(1, unit_convert[channelHeader.unit])
        else:
            # print channelHeader.unit
            try:
                unit = pq.Quantity(1, channelHeader.unit)
            except:
                unit = pq.Quantity(1, '')

        for b, bs in enumerate(blocksize):
            if lazy:
                signal = [] * unit
            else:
                signal = pq.Quantity(np.empty(bs, dtype='f4'), units=unit)
            ana_sig = AnalogSignal(
                signal,
                sampling_rate=sampling_rate,
                t_start=(starttimes[b] * header.us_per_time *
                         header.dtime_base * pq.s),
                channel_index=channel_num)
            ana_sigs.append(ana_sig)

        if lazy:
            for s, ana_sig in enumerate(ana_sigs):
                ana_sig.lazy_shape = blocksize[s]

        else:
            # read data  by jumping block to block
            fid.seek(channelHeader.firstblock)
            pos = 0
            numblock = 0
            for b in range(channelHeader.blocks):
                blockHeader = HeaderReader(fid,
                                           np.dtype(blockHeaderDesciption))
                # read data
                sig = np.fromstring(fid.read(blockHeader.items * dt.itemsize),
                                    dtype=dt)
                ana_sigs[numblock][pos:pos + sig.size] = \
                    sig.reshape(-1, 1).astype('f4') * unit
                pos += sig.size
                if pos >= blocksize[numblock]:
                    numblock += 1
                    pos = 0
                # jump to next block
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

        # convert for int16
        if dt.kind == 'i':
            for ana_sig in ana_sigs:
                ana_sig *= channelHeader.scale / 6553.6
                ana_sig += channelHeader.offset * unit

        return ana_sigs
def test_lcc25_voltage1():
    with expected_protocol(ik.thorlabs.LCC25, ["volt1?", "volt1=10.0"],
                           ["volt1?", "20", ">volt1=10.0", ">"],
                           sep="\r") as lcc:
        unit_eq(lcc.voltage1, pq.Quantity(20, "V"))
        lcc.voltage1 = 10.0
Ejemplo n.º 8
0
    def __new__(cls,
                times,
                t_stop,
                units=None,
                dtype=None,
                copy=True,
                sampling_rate=1.0 * pq.Hz,
                t_start=0.0 * pq.s,
                waveforms=None,
                left_sweep=None,
                name=None,
                file_origin=None,
                description=None,
                **annotations):
        '''
        Constructs a new :clas:`Spiketrain` instance from data.

        This is called whenever a new :class:`SpikeTrain` is created from the
        constructor, but not when slicing.
        '''
        if len(times) != 0 and waveforms is not None and len(
                times
        ) != waveforms.shape[
                0]:  #len(times)!=0 has been used to workaround a bug occuring during neo import)
            raise ValueError(
                "the number of waveforms should be equal to the number of spikes"
            )

        # Make sure units are consistent
        # also get the dimensionality now since it is much faster to feed
        # that to Quantity rather than a unit
        if units is None:
            # No keyword units, so get from `times`
            try:
                dim = times.units.dimensionality
            except AttributeError:
                raise ValueError('you must specify units')
        else:
            if hasattr(units, 'dimensionality'):
                dim = units.dimensionality
            else:
                dim = pq.quantity.validate_dimensionality(units)

            if hasattr(times, 'dimensionality'):
                if times.dimensionality.items() == dim.items():
                    units = None  # units will be taken from times, avoids copying
                else:
                    if not copy:
                        raise ValueError("cannot rescale and return view")
                    else:
                        # this is needed because of a bug in python-quantities
                        # see issue # 65 in python-quantities github
                        # remove this if it is fixed
                        times = times.rescale(dim)

        if dtype is None:
            if not hasattr(times, 'dtype'):
                dtype = np.float
        elif hasattr(times, 'dtype') and times.dtype != dtype:
            if not copy:
                raise ValueError("cannot change dtype and return view")

            # if t_start.dtype or t_stop.dtype != times.dtype != dtype,
            # _check_time_in_range can have problems, so we set the t_start
            # and t_stop dtypes to be the same as times before converting them
            # to dtype below
            # see ticket #38
            if hasattr(t_start, 'dtype') and t_start.dtype != times.dtype:
                t_start = t_start.astype(times.dtype)
            if hasattr(t_stop, 'dtype') and t_stop.dtype != times.dtype:
                t_stop = t_stop.astype(times.dtype)

        # check to make sure the units are time
        # this approach is orders of magnitude faster than comparing the
        # reference dimensionality
        if (len(dim) != 1 or list(dim.values())[0] != 1
                or not isinstance(list(dim.keys())[0], pq.UnitTime)):
            ValueError("Unit has dimensions %s, not [time]" % dim.simplified)

        # Construct Quantity from data
        obj = pq.Quantity(times, units=units, dtype=dtype, copy=copy).view(cls)

        # if the dtype and units match, just copy the values here instead
        # of doing the much more expensive creation of a new Quantity
        # using items() is orders of magnitude faster
        if (hasattr(t_start, 'dtype') and t_start.dtype == obj.dtype
                and hasattr(t_start, 'dimensionality')
                and t_start.dimensionality.items() == dim.items()):
            obj.t_start = t_start.copy()
        else:
            obj.t_start = pq.Quantity(t_start, units=dim, dtype=obj.dtype)

        if (hasattr(t_stop, 'dtype') and t_stop.dtype == obj.dtype
                and hasattr(t_stop, 'dimensionality')
                and t_stop.dimensionality.items() == dim.items()):
            obj.t_stop = t_stop.copy()
        else:
            obj.t_stop = pq.Quantity(t_stop, units=dim, dtype=obj.dtype)

        # Store attributes
        obj.waveforms = waveforms
        obj.left_sweep = left_sweep
        obj.sampling_rate = sampling_rate

        # parents
        obj.segment = None
        obj.unit = None

        # Error checking (do earlier?)
        _check_time_in_range(obj, obj.t_start, obj.t_stop, view=True)

        return obj
Ejemplo n.º 9
0
 def __setslice__(self, i, j, value):
     if not hasattr(value, "units"):
         value = pq.Quantity(value, units=self.units)
     _check_time_in_range(value, self.t_start, self.t_stop)
     super(SpikeTrain, self).__setslice__(i, j, value)
Ejemplo n.º 10
0
 def test_cv2_with_quantities(self):
     seq = pq.Quantity(self.test_seq, units='ms')
     assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
Ejemplo n.º 11
0
 def test_isi_with_quantities_1d(self):
     st = pq.Quantity(self.test_array_1d, units='ms')
     target = pq.Quantity(self.targ_array_1d, 'ms')
     res = statistics.isi(st)
     assert_array_almost_equal(res, target, decimal=9)
Ejemplo n.º 12
0
 def test_mean_firing_rate_with_quantities_1d(self):
     st = pq.Quantity(self.test_array_1d, units='ms')
     target = pq.Quantity(self.targ_array_1d / self.max_array_1d, '1/ms')
     res = statistics.mean_firing_rate(st)
     assert_array_almost_equal(res, target, decimal=9)
Ejemplo n.º 13
0
 def test_mean_firing_rate_with_spiketrain(self):
     st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
     target = pq.Quantity(self.targ_array_1d / 10., '1/ms')
     res = statistics.mean_firing_rate(st)
     assert_array_almost_equal(res, target, decimal=9)
#~ mapperInfo = open_db(url, myglobals = globals(), )

mapperInfo = open_db(url,
                     myglobals=globals(),
                     numpy_storage_engine='hdf5',
                     hdf5_filename=hdf5_filename)

seg = Segment(name='first seg')
seg.save()
for i in range(3):
    ana = AnalogSignal()
    seg.analogsignals.append(ana)
    ana.save()
    ana.name = 'from attr {}'.format(i)
    ana.signal = pq.Quantity([1., 2., 3.], units='mV')
    ana.t_start = 50.23 * pq.s
    ana.sampling_rate = 1000. * pq.Hz
    ana.save()

for i in range(1):
    ana = AnalogSignal(
        signal=pq.Quantity([1., 2., 3.], units='mV'),
        t_start=15.654654 * pq.s,
        sampling_rate=10 * pq.Hz,
        name='ini init {}'.format(i),
    )
    ana.signal[1] = 4 * pq.mV
    seg.analogsignals.append(ana)

for i in range(2):
Ejemplo n.º 15
0
 def test___get_sampling_rate__period_rate_not_equivalent_ValueError(self):
     sampling_rate = pq.Quantity(10., units=pq.Hz)
     sampling_period = pq.Quantity(10, units=pq.s)
     self.assertRaises(ValueError, _get_sampling_rate,
                       sampling_rate, sampling_period)
    def format_data(self, data):
        """
        This accepts data input in the form:
        ***** (observation) *****
        {   "AA":{
                "SO": {"mean": "X0"},
                "SP": {"mean": "X1"},
                "SR": {"mean": "X2"},
                "SLM":{"mean": "X3"}
            },
            "BP": {...},
            "BS": {...},
            "CCKBC":{...},
            "Ivy":{...},
            "OLM":{...},
            "PC":{...},
            "PPA":{...},
            "SCA":{...},
            "Tri":{...}
        }
        ***** (prediction) *****
        {   "AA":{
                "SO": {"value": "X0"},
                "SP": {"value": "X1"},
                "SR": {"value": "X2"},
                "SLM":{"value": "X3"},
                "OUT":{"value": "X4"}
            },
            "BP": {...},
            "BS": {...},
            "CCKBC":{...},
            "Ivy":{...},
            "OLM":{...},
            "PC":{...},
            "PPA":{...},
            "SCA":{...},
            "Tri":{...}
        }
        Returns a new dictionary of the form
        { "AA":[X0, X1, X2, X3, X4], "BP":[...] , "BS":[...], "CCKBC":[...], "Ivy":[...], "OLM":[...],
        "PC":[...], "PPA":[...], "SCA":[...], "Tri":[...] }
        """

        data_new_dict = dict()
        for key0, dict0 in data.items():  # dict0: a dictionary containing the synapses fraction in each of the
                                    # Hippocampus CA1 layers (SO, SP, SR, SLM) and OUT (for prediction data only)
                                    # for each m-type cell (AA, BP, BS, CCKBC, Ivy, OLM, PC, PPA, SCA, Tri)
            data_list_1 = list()
            for dict1 in dict0.values():  # dict1: a dictionary of the form
                                        # {"mean": "X0"} (observation) or {"value": "X"} (prediction)
                try:
                    synapses_fraction = float(dict1.values()[0])
                    assert(synapses_fraction <= 1.0)
                    data_list_1.extend([synapses_fraction])
                except:
                    raise sciunit.Error("Values not in appropriate format. Synapses fraction of an m-type cell"
                                        "must be dimensionless and not larger than 1.0")

            if "out" not in [x.lower() for x in dict0.keys()]: data_list_1.extend([0.0])  # observation data
            data_list_1_q = quantities.Quantity(data_list_1, self.units)
            data_new_dict[key0] = data_list_1_q

        return data_new_dict
Ejemplo n.º 17
0
 def times(self):
     return pq.Quantity(self)
Ejemplo n.º 18
0
    def read_block(
        self,
        lazy=False,
        cascade=True,
    ):
        bl = Block(file_origin=os.path.basename(self.filename), )
        if not cascade:
            return bl

        fid = open(self.filename, 'rb')

        headertext = fid.read(1024)
        if PY3K:
            headertext = headertext.decode('ascii')
        header = {}
        for line in headertext.split('\r\n'):
            if '=' not in line: continue
            #print '#' , line , '#'
            key, val = line.split('=')
            if key in [
                    'NC',
                    'NR',
                    'NBH',
                    'NBA',
                    'NBD',
                    'ADCMAX',
                    'NP',
                    'NZ',
            ]:
                val = int(val)
            elif key in [
                    'AD',
                    'DT',
            ]:
                val = val.replace(',', '.')
                val = float(val)
            header[key] = val

        #print header

        SECTORSIZE = 512
        # loop for record number
        for i in range(header['NR']):
            #print 'record ',i
            offset = 1024 + i * (SECTORSIZE * header['NBD'] + 1024)

            # read analysis zone
            analysisHeader = HeaderReader(
                fid, AnalysisDescription).read_f(offset=offset)
            #print analysisHeader

            # read data
            NP = (SECTORSIZE * header['NBD']) / 2
            NP = NP - NP % header['NC']
            NP = NP / header['NC']
            if not lazy:
                data = np.memmap(
                    self.filename,
                    np.dtype('i2'),
                    'r',
                    #shape = (header['NC'], header['NP']) ,
                    shape=(
                        NP,
                        header['NC'],
                    ),
                    offset=offset + header['NBA'] * SECTORSIZE)

            # create a segment
            seg = Segment()
            bl.segments.append(seg)

            for c in range(header['NC']):

                unit = header['YU%d' % c]
                try:
                    unit = pq.Quantity(1., unit)
                except:
                    unit = pq.Quantity(1., '')

                if lazy:
                    signal = [] * unit
                else:
                    YG = float(header['YG%d' % c].replace(',', '.'))
                    ADCMAX = header['ADCMAX']
                    VMax = analysisHeader['VMax'][c]
                    signal = data[:, header['YO%d' % c]].astype(
                        'f4') * VMax / ADCMAX / YG * unit
                anaSig = AnalogSignal(
                    signal,
                    sampling_rate=pq.Hz / analysisHeader['SamplingInterval'],
                    t_start=analysisHeader['TimeRecorded'] * pq.s,
                    name=header['YN%d' % c],
                    channel_index=c)

                if lazy:
                    anaSig.lazy_shape = NP
                seg.analogsignals.append(anaSig)

        fid.close()

        bl.create_many_to_one_relationship()
        return bl
Ejemplo n.º 19
0
    def read_one_channel_event_or_spike(self,
                                        fid,
                                        channel_num,
                                        header,
                                        lazy=True):
        # return SPikeTrain or Event
        channelHeader = header.channelHeaders[channel_num]
        if channelHeader.firstblock < 0:
            return
        if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
            return

        # # Step 1 : type of blocks
        if channelHeader.kind in [2, 3, 4]:
            # Event data
            fmt = [('tick', 'i4')]
        elif channelHeader.kind in [5]:
            # Marker data
            fmt = [('tick', 'i4'), ('marker', 'i4')]
        elif channelHeader.kind in [6]:
            # AdcMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('adc', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [7]:
            #  RealMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('real', 'S%d' % channelHeader.n_extra)]
        elif channelHeader.kind in [8]:
            # TextMark data
            fmt = [('tick', 'i4'), ('marker', 'i4'),
                   ('label', 'S%d' % channelHeader.n_extra)]
        dt = np.dtype(fmt)

        ## Step 2 : first read for allocating mem
        fid.seek(channelHeader.firstblock)
        totalitems = 0
        for _ in range(channelHeader.blocks):
            blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
            totalitems += blockHeader.items
            if blockHeader.succ_block > 0:
                fid.seek(blockHeader.succ_block)
        #~ print 'totalitems' , totalitems

        if lazy:
            if channelHeader.kind in [2, 3, 4, 5, 8]:
                ea = Event()
                ea.annotate(channel_index=channel_num)
                ea.lazy_shape = totalitems
                return ea

            elif channelHeader.kind in [6, 7]:
                # correct value for t_stop to be put in later
                sptr = SpikeTrain([] * pq.s, t_stop=1e99)
                sptr.annotate(channel_index=channel_num, ced_unit=0)
                sptr.lazy_shape = totalitems
                return sptr
        else:
            alltrigs = np.zeros(totalitems, dtype=dt)
            ## Step 3 : read
            fid.seek(channelHeader.firstblock)
            pos = 0
            for _ in range(channelHeader.blocks):
                blockHeader = HeaderReader(fid,
                                           np.dtype(blockHeaderDesciption))
                # read all events in block
                trigs = np.fromstring(fid.read(blockHeader.items *
                                               dt.itemsize),
                                      dtype=dt)

                alltrigs[pos:pos + trigs.size] = trigs
                pos += trigs.size
                if blockHeader.succ_block > 0:
                    fid.seek(blockHeader.succ_block)

            ## Step 3 convert in neo standard class: eventarrays or spiketrains
            alltimes = alltrigs['tick'].astype(
                'f') * header.us_per_time * header.dtime_base * pq.s

            if channelHeader.kind in [2, 3, 4, 5, 8]:
                #events
                ea = Event(alltimes)
                ea.annotate(channel_index=channel_num)
                if channelHeader.kind >= 5:
                    # Spike2 marker is closer to label sens of neo
                    ea.labels = alltrigs['marker'].astype('S32')
                if channelHeader.kind == 8:
                    ea.annotate(extra_labels=alltrigs['label'])
                return ea

            elif channelHeader.kind in [6, 7]:
                # spiketrains

                # waveforms
                if channelHeader.kind == 6:
                    waveforms = np.fromstring(alltrigs['adc'].tostring(),
                                              dtype='i2')
                    waveforms = waveforms.astype(
                        'f4') * channelHeader.scale / 6553.6 + \
                        channelHeader.offset
                elif channelHeader.kind == 7:
                    waveforms = np.fromstring(alltrigs['real'].tostring(),
                                              dtype='f4')

                if header.system_id >= 6 and channelHeader.interleave > 1:
                    waveforms = waveforms.reshape(
                        (alltimes.size, -1, channelHeader.interleave))
                    waveforms = waveforms.swapaxes(1, 2)
                else:
                    waveforms = waveforms.reshape((alltimes.size, 1, -1))

                if header.system_id in [1, 2, 3, 4, 5]:
                    sample_interval = (channelHeader.divide *
                                       header.us_per_time *
                                       header.time_per_adc) * 1e-6
                else:
                    sample_interval = (channelHeader.l_chan_dvd *
                                       header.us_per_time * header.dtime_base)

                if channelHeader.unit in unit_convert:
                    unit = pq.Quantity(1, unit_convert[channelHeader.unit])
                else:
                    #print channelHeader.unit
                    try:
                        unit = pq.Quantity(1, channelHeader.unit)
                    except:
                        unit = pq.Quantity(1, '')

                if len(alltimes) > 0:
                    # can get better value from associated AnalogSignal(s) ?
                    t_stop = alltimes.max()
                else:
                    t_stop = 0.0

                if not self.ced_units:
                    sptr = SpikeTrain(alltimes,
                                      waveforms=waveforms * unit,
                                      sampling_rate=(1. / sample_interval) *
                                      pq.Hz,
                                      t_stop=t_stop)
                    sptr.annotate(channel_index=channel_num, ced_unit=0)
                    return [sptr]

                sptrs = []
                for i in set(alltrigs['marker'] & 255):
                    sptr = SpikeTrain(
                        alltimes[alltrigs['marker'] == i],
                        waveforms=waveforms[alltrigs['marker'] == i] * unit,
                        sampling_rate=(1. / sample_interval) * pq.Hz,
                        t_stop=t_stop)
                    sptr.annotate(channel_index=channel_num, ced_unit=i)
                    sptrs.append(sptr)

                return sptrs
Ejemplo n.º 20
0
 def test_quantities_array_uint(self):
     '''test to make sure uint type quantites arrays are accepted'''
     value = pq.Quantity([1, 2, 3, 4, 5], dtype=np.uint, units=pq.meter)
     self.base.annotate(data=value)
     result = {'data': value}
     self.assertDictEqual(result, self.base.annotations)
def test_lcc25_maxvoltage():
    with expected_protocol(ik.thorlabs.LCC25, ["max?", "max=10.0"],
                           ["max?", "20", ">max=10.0", ">"],
                           sep="\r") as lcc:
        unit_eq(lcc.max_voltage, pq.Quantity(20, "V"))
        lcc.max_voltage = 10.0
Ejemplo n.º 22
0
 def test_quantities_scalar_str(self):
     '''test to make sure str type quantites scalars are accepted'''
     value = pq.Quantity(99, dtype=np.str, units=pq.meter)
     self.base.annotate(data=value)
     result = {'data': value}
     self.assertDictEqual(result, self.base.annotations)
def test_lcc25_increment():
    with expected_protocol(ik.thorlabs.LCC25, ["increment?", "increment=10.0"],
                           ["increment?", "20", ">increment=10.0", ">"],
                           sep="\r") as lcc:
        unit_eq(lcc.increment, pq.Quantity(20, "V"))
        lcc.increment = 10.0
Ejemplo n.º 24
0
 def test___get_sampling_rate__period_quant_rate_none(self):
     sampling_rate = None
     sampling_period = pq.Quantity(10., units=pq.s)
     targ_rate = 1/sampling_period
     out_rate = _get_sampling_rate(sampling_rate, sampling_period)
     self.assertEqual(targ_rate, out_rate)
Ejemplo n.º 25
0
def cross_correlation_histogram(binned_spiketrain_i,
                                binned_spiketrain_j,
                                window='full',
                                border_correction=False,
                                binary=False,
                                kernel=None,
                                method='speed',
                                cross_correlation_coefficient=False):
    """
    Computes the cross-correlation histogram (CCH) between two binned spike
    trains `binned_spiketrain_i` and `binned_spiketrain_j`.

    Visualization of this function is covered in Viziphant:
    :func:`viziphant.spike_train_correlation.plot_cross_correlation_histogram`.

    Parameters
    ----------
    binned_spiketrain_i, binned_spiketrain_j : BinnedSpikeTrain
        Binned spike trains of lengths N and M to cross-correlate - the output
        of :class:`elephant.conversion.BinnedSpikeTrain`. The input
        spike trains can have any `t_start` and `t_stop`.
    window : {'valid', 'full'} or list of int, optional
        ‘full’: This returns the cross-correlation at each point of overlap,
                with an output shape of (N+M-1,). At the end-points of the
                cross-correlogram, the signals do not overlap completely, and
                boundary effects may be seen.
        ‘valid’: Mode valid returns output of length max(M, N) - min(M, N) + 1.
                 The cross-correlation product is only given for points where
                 the signals overlap completely.
                 Values outside the signal boundary have no effect.
        List of integers (min_lag, max_lag):
              The entries of window are two integers representing the left and
              right extremes (expressed as number of bins) where the
              cross-correlation is computed.
        Default: 'full'
    border_correction : bool, optional
        whether to correct for the border effect. If True, the value of the
        CCH at bin :math:`b` (for :math:`b=-H,-H+1, ...,H`, where :math:`H` is
        the CCH half-length) is multiplied by the correction factor:

        .. math::
                            (H+1)/(H+1-|b|),

        which linearly corrects for loss of bins at the edges.
        Default: False
    binary : bool, optional
        If True, spikes falling in the same bin are counted as a single spike;
        otherwise they are counted as different spikes.
        Default: False
    kernel : np.ndarray or None, optional
        A one dimensional array containing a smoothing kernel applied
        to the resulting CCH. The length N of the kernel indicates the
        smoothing window. The smoothing window cannot be larger than the
        maximum lag of the CCH. The kernel is normalized to unit area before
        being applied to the resulting CCH. Popular choices for the kernel are
          * normalized boxcar kernel: `numpy.ones(N)`
          * hamming: `numpy.hamming(N)`
          * hanning: `numpy.hanning(N)`
          * bartlett: `numpy.bartlett(N)`
        If None, the CCH is not smoothed.
        Default: None
    method : {'speed', 'memory'}, optional
        Defines the algorithm to use. "speed" uses `numpy.correlate` to
        calculate the correlation between two binned spike trains using a
        non-sparse data representation. Due to various optimizations, it is the
        fastest realization. In contrast, the option "memory" uses an own
        implementation to calculate the correlation based on sparse matrices,
        which is more memory efficient but slower than the "speed" option.
        Default: "speed"
    cross_correlation_coefficient : bool, optional
        If True, a normalization is applied to the CCH to obtain the
        cross-correlation  coefficient function ranging from -1 to 1 according
        to Equation (5.10) in :cite:`correlation-Eggermont2010_77`. See Notes.
        Default: False

    Returns
    -------
    cch_result : neo.AnalogSignal
        Containing the cross-correlation histogram between
        `binned_spiketrain_i` and `binned_spiketrain_j`.

        Offset bins correspond to correlations at delays equivalent
        to the differences between the spike times of `binned_spiketrain_i` and
        those of `binned_spiketrain_j`: an entry at positive lag corresponds to
        a spike in `binned_spiketrain_j` following a spike in
        `binned_spiketrain_i` bins to the right, and an entry at negative lag
        corresponds to a spike in `binned_spiketrain_i` following a spike in
        `binned_spiketrain_j`.

        To illustrate this definition, consider two spike trains with the same
        `t_start` and `t_stop`:
        `binned_spiketrain_i` ('reference neuron') : 0 0 0 0 1 0 0 0 0 0 0
        `binned_spiketrain_j` ('target neuron')    : 0 0 0 0 0 0 0 1 0 0 0
        Here, the CCH will have an entry of `1` at `lag=+3`.

        Consistent with the definition of `neo.AnalogSignals`, the time axis
        represents the left bin borders of each histogram bin. For example,
        the time axis might be:
        `np.array([-2.5 -1.5 -0.5 0.5 1.5]) * ms`
    lags : np.ndarray
        Contains the IDs of the individual histogram bins, where the central
        bin has ID 0, bins to the left have negative IDs and bins to the right
        have positive IDs, e.g.,:
        `np.array([-3, -2, -1, 0, 1, 2, 3])`

    Notes
    -----
    1. The Eq. (5.10) in :cite:`correlation-Eggermont2010_77` is valid for
       binned spike trains with at most one spike per bin. For a general case,
       refer to the implementation of `_covariance_sparse()`.
    2. Alias: `cch`

    Examples
    --------
    Plot the cross-correlation histogram between two Poisson spike trains

    >>> import elephant
    >>> import quantities as pq
    >>> import numpy as np
    >>> from elephant.conversion import BinnedSpikeTrain
    >>> from elephant.spike_train_generation import homogeneous_poisson_process
    >>> from elephant.spike_train_correlation import \
    ... cross_correlation_histogram

    >>> np.random.seed(1)
    >>> binned_spiketrain_i = BinnedSpikeTrain(
    ...        homogeneous_poisson_process(
    ...            10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
    ...        bin_size=5. * pq.ms)
    >>> binned_spiketrain_j = BinnedSpikeTrain(
    ...        homogeneous_poisson_process(
    ...            10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
    ...        bin_size=5. * pq.ms)

    >>> cc_hist, lags = cross_correlation_histogram(
    ...        binned_spiketrain_i, binned_spiketrain_j, window=[-10, 10],
    ...        border_correction=False,
    ...        binary=False, kernel=None)
    >>> print(cc_hist.flatten())
    [ 5.  3.  3.  2.  4.  0.  1.  5.  3.  4.  2.  2.  2.  5.
      1.  2.  4.  2. -0.  3.  3.] dimensionless
    >>> lags
    array([-10,  -9,  -8,  -7,  -6,  -5,  -4,  -3,  -2,  -1,
         0,   1,   2,   3,   4,   5,   6,   7,   8,   9,
        10], dtype=int32)

    """

    # Check that the spike trains are binned with the same temporal
    # resolution
    if binned_spiketrain_i.shape[0] != 1 or \
            binned_spiketrain_j.shape[0] != 1:
        raise ValueError("Spike trains must be one dimensional")

    # rescale to the common units
    # this does not change the data - only its representation
    binned_spiketrain_j.rescale(binned_spiketrain_i.units)

    if not np.isclose(binned_spiketrain_i._bin_size,
                      binned_spiketrain_j._bin_size):
        raise ValueError("Bin sizes must be equal")

    bin_size = binned_spiketrain_i._bin_size
    left_edge_min = -binned_spiketrain_i.n_bins + 1
    right_edge_max = binned_spiketrain_j.n_bins - 1

    t_lags_shift = (binned_spiketrain_j._t_start -
                    binned_spiketrain_i._t_start) / bin_size
    if not np.isclose(t_lags_shift, round(t_lags_shift)):
        # For example, if bin_size=1 ms, binned_spiketrain_i.t_start=0 ms, and
        # binned_spiketrain_j.t_start=0.5 ms then there is a global shift in
        # the binning of the spike trains.
        raise ValueError(
            "Binned spiketrains time shift is not multiple of bin_size")
    t_lags_shift = int(round(t_lags_shift))

    # In the examples below we fix st2 and "move" st1.
    # Zero-lag is equal to `max(st1.t_start, st2.t_start)`.
    # Binned spiketrains (t_start and t_stop) with bin_size=1ms:
    # 1) st1=[3, 8] ms, st2=[1, 13] ms
    #    t_start_shift = -2 ms
    #    zero-lag is at 3 ms
    # 2) st1=[1, 7] ms, st2=[2, 9] ms
    #    t_start_shift = 1 ms
    #    zero-lag is at 2 ms
    # 3) st1=[1, 7] ms, st2=[4, 6] ms
    #    t_start_shift = 3 ms
    #    zero-lag is at 4 ms

    # Find left and right edges of unaligned (time-dropped) time signals
    if len(window) == 2 and np.issubdtype(type(window[0]), np.integer) \
            and np.issubdtype(type(window[1]), np.integer):
        # ex. 1) lags range: [w[0] - 2, w[1] - 2] ms
        # ex. 2) lags range: [w[0] + 1, w[1] + 1] ms
        # ex. 3) lags range: [w[0] + 3, w[0] + 3] ms
        if window[0] >= window[1]:
            raise ValueError(
                "Window's left edge ({left}) must be lower than the right "
                "edge ({right})".format(left=window[0], right=window[1]))
        left_edge, right_edge = np.subtract(window, t_lags_shift)
        if left_edge < left_edge_min or right_edge > right_edge_max:
            raise ValueError(
                "The window exceeds the length of the spike trains")
        lags = np.arange(window[0], window[1] + 1, dtype=np.int32)
        cch_mode = 'pad'
    elif window == 'full':
        # cch computed for all the possible entries
        # ex. 1) lags range: [-6, 9] ms
        # ex. 2) lags range: [-4, 7] ms
        # ex. 3) lags range: [-2, 4] ms
        left_edge = left_edge_min
        right_edge = right_edge_max
        lags = np.arange(left_edge + t_lags_shift,
                         right_edge + 1 + t_lags_shift,
                         dtype=np.int32)
        cch_mode = window
    elif window == 'valid':
        lags = _CrossCorrHist.get_valid_lags(binned_spiketrain_i,
                                             binned_spiketrain_j)
        left_edge, right_edge = lags[(0, -1), ]
        cch_mode = window
    else:
        raise ValueError("Invalid window parameter")

    if binary:
        binned_spiketrain_i = binned_spiketrain_i.binarize()
        binned_spiketrain_j = binned_spiketrain_j.binarize()

    cch_builder = _CrossCorrHist(binned_spiketrain_i,
                                 binned_spiketrain_j,
                                 window=(left_edge, right_edge))
    if method == 'memory':
        cross_corr = cch_builder.correlate_memory(cch_mode=cch_mode)
    else:
        cross_corr = cch_builder.correlate_speed(cch_mode=cch_mode)

    if border_correction:
        if window == 'valid':
            warnings.warn(
                "Border correction does not have any effect in "
                "'valid' window mode since there are no border effects!")
        else:
            cross_corr = cch_builder.border_correction(cross_corr)
    if kernel is not None:
        cross_corr = cch_builder.kernel_smoothing(cross_corr, kernel=kernel)
    if cross_correlation_coefficient:
        cross_corr = cch_builder.cross_correlation_coefficient(cross_corr)

    normalization = 'normalized' if cross_correlation_coefficient else 'counts'
    annotations = dict(window=window,
                       border_correction=border_correction,
                       binary=binary,
                       kernel=kernel is not None,
                       normalization=normalization)
    annotations = dict(cch_parameters=annotations)

    # Transform the array count into an AnalogSignal
    t_start = pq.Quantity((lags[0] - 0.5) * bin_size,
                          units=binned_spiketrain_i.units,
                          copy=False)
    cch_result = neo.AnalogSignal(signal=np.expand_dims(cross_corr, axis=1),
                                  units=pq.dimensionless,
                                  t_start=t_start,
                                  sampling_period=binned_spiketrain_i.bin_size,
                                  copy=False,
                                  **annotations)
    return cch_result, lags
Ejemplo n.º 26
0
 def test___get_sampling_rate__period_none_rate_quant(self):
     sampling_rate = pq.Quantity(10., units=pq.Hz)
     sampling_period = None
     targ_rate = sampling_rate
     out_rate = _get_sampling_rate(sampling_rate, sampling_period)
     self.assertEqual(targ_rate, out_rate)
Ejemplo n.º 27
0
def zscore(signal, inplace=True):
    '''
    Apply a z-score operation to one or several AnalogSignalArray objects.

    The z-score operation subtracts the mean :math:`\\mu` of the signal, and
    divides by its standard deviation :math:`\\sigma`:

    .. math::
         Z(x(t))= \\frac{x(t)-\\mu}{\\sigma}

    If an AnalogSignalArray containing multiple signals is provided, the
    z-transform is always calculated for each signal individually.

    If a list of AnalogSignalArray objects is supplied, the mean and standard
    deviation are calculated across all objects of the list. Thus, all list
    elements are z-transformed by the same values of :math:`\\mu` and
    :math:`\\sigma`. For AnalogSignalArrays, each signal of the array is
    treated separately across list elements. Therefore, the number of signals
    must be identical for each AnalogSignalArray of the list.

    Parameters
    ----------
    signal : neo.AnalogSignalArray or list of neo.AnalogSignalArray
        Signals for which to calculate the z-score.
    inplace : bool
        If True, the contents of the input signal(s) is replaced by the
        z-transformed signal. Otherwise, a copy of the original
        AnalogSignalArray(s) is returned. Default: True

    Returns
    -------
    neo.AnalogSignalArray or list of neo.AnalogSignalArray
        The output format matches the input format: for each supplied
        AnalogSignalArray object a corresponding object is returned containing
        the z-transformed signal with the unit dimensionless.

    Use Case
    --------
    You may supply a list of AnalogSignalArray objects, where each object in
    the list contains the data of one trial of the experiment, and each signal
    of the AnalogSignalArray corresponds to the recordings from one specific
    electrode in a particular trial. In this scenario, you will z-transform the
    signal of each electrode separately, but transform all trials of a given
    electrode in the same way.

    Examples
    --------
    >>> a = neo.AnalogSignalArray(
    ...       np.array([1, 2, 3, 4, 5, 6]).reshape(-1,1)*mV,
    ...       t_start=0*s, sampling_rate=1000*Hz)

    >>> b = neo.AnalogSignalArray(
    ...       np.transpose([[1, 2, 3, 4, 5, 6], [11, 12, 13, 14, 15, 16]])*mV,
    ...       t_start=0*s, sampling_rate=1000*Hz)

    >>> c = neo.AnalogSignalArray(
    ...       np.transpose([[21, 22, 23, 24, 25, 26], [31, 32, 33, 34, 35, 36]])*mV,
    ...       t_start=0*s, sampling_rate=1000*Hz)

    >>> print zscore(a)
    [[-1.46385011]
     [-0.87831007]
     [-0.29277002]
     [ 0.29277002]
     [ 0.87831007]
     [ 1.46385011]] dimensionless

    >>> print zscore(b)
    [[-1.46385011 -1.46385011]
     [-0.87831007 -0.87831007]
     [-0.29277002 -0.29277002]
     [ 0.29277002  0.29277002]
     [ 0.87831007  0.87831007]
     [ 1.46385011  1.46385011]] dimensionless

    >>> print zscore([b,c])
    [<AnalogSignalArray(array([[-1.11669108, -1.08361877],
       [-1.0672076 , -1.04878252],
       [-1.01772411, -1.01394628],
       [-0.96824063, -0.97911003],
       [-0.91875714, -0.94427378],
       [-0.86927366, -0.90943753]]) * dimensionless, [0.0 s, 0.006 s],
       sampling rate: 1000.0 Hz)>,
       <AnalogSignalArray(array([[ 0.78170952,  0.84779261],
       [ 0.86621866,  0.90728682],
       [ 0.9507278 ,  0.96678104],
       [ 1.03523694,  1.02627526],
       [ 1.11974608,  1.08576948],
       [ 1.20425521,  1.1452637 ]]) * dimensionless, [0.0 s, 0.006 s],
       sampling rate: 1000.0 Hz)>]
    '''
    # Transform input to a list
    if type(signal) is not list:
        signal = [signal]

    # Calculate mean and standard deviation
    m = np.mean(np.concatenate(signal), axis=0, keepdims=True)
    s = np.std(np.concatenate(signal), axis=0, keepdims=True)

    if not inplace:
        # Create new signal instance
        result = []
        for sig in signal:
            sig_dimless = sig.duplicate_with_new_array(
                (sig.magnitude - m.magnitude) / s.magnitude) / sig.units
            result.append(sig_dimless)
    else:
        result = []
        # Overwrite signal
        for sig in signal:
            sig[:] = pq.Quantity(
                (sig.magnitude - m.magnitude) / s.magnitude,
                units=sig.units)
            sig_dimless = sig / sig.units
            result.append(sig_dimless)
    # Return single object, or list of objects
    if len(result) == 1:
        return result[0]
    else:
        return result
Ejemplo n.º 28
0
 def test___get_sampling_rate__period_rate_equivalent(self):
     sampling_rate = pq.Quantity(10., units=pq.Hz)
     sampling_period = pq.Quantity(0.1, units=pq.s)
     targ_rate = sampling_rate
     out_rate = _get_sampling_rate(sampling_rate, sampling_period)
     self.assertEqual(targ_rate, out_rate)
Ejemplo n.º 29
0
def unitIsValid(unit):
    try:
        pq.Quantity(1, unit)
    except:
        return False
    return True
Ejemplo n.º 30
0
 def _read_analogsignal_t_start(self, attrs, data_group):
     t_start = float(data_group.attrs['tstart']) * pq.Quantity(
         1, data_group.attrs['tunit'])
     t_start = t_start.rescale(attrs['t_start_unit'])
     return t_start