Esempio n. 1
0
    def test_read_integer(self):
        """
        Tests if spike times are actually stored as integers if they are stored
        in time steps in the file.
        """
        filename = self.get_local_path('nest/0time_in_steps-1257-0.gdf')
        r = NestIO(filenames=filename)
        st = r.read_spiketrain(gdf_id=None, t_start=400. * pq.ms,
                               t_stop=500. * pq.ms,
                               time_unit=pq.CompoundUnit('0.1*ms'),
                               lazy=False, id_column=None, time_column=0)
        self.assertTrue(st.magnitude.dtype == np.int32)
        seg = r.read_segment(gid_list=[None], t_start=400. * pq.ms,
                             t_stop=500. * pq.ms,
                             time_unit=pq.CompoundUnit('0.1*ms'),
                             lazy=False, id_column_gdf=None, time_column_gdf=0)
        sts = seg.spiketrains
        self.assertTrue(all([st.magnitude.dtype == np.int32 for st in sts]))

        filename = self.get_local_path('nest/0gid-1time_in_steps-1258-0.gdf')
        r = NestIO(
            filenames=filename)
        st = r.read_spiketrain(gdf_id=1, t_start=400. * pq.ms,
                               t_stop=500. * pq.ms,
                               time_unit=pq.CompoundUnit('0.1*ms'),
                               lazy=False, id_column=0, time_column=1)
        self.assertTrue(st.magnitude.dtype == np.int32)
        seg = r.read_segment(gid_list=[1], t_start=400. * pq.ms,
                             t_stop=500. * pq.ms,
                             time_unit=pq.CompoundUnit('0.1*ms'),
                             lazy=False, id_column_gdf=0, time_column_gdf=1)
        sts = seg.spiketrains
        self.assertTrue(all([st.magnitude.dtype == np.int32 for st in sts]))
Esempio n. 2
0
    def test_read_analogsignal(self):
        """
        Tests reading files in the 2 different formats:
        - with GIDs, with times as floats
        - with GIDs, with time as integer
        """

        filename = get_test_file_full_path(
            ioclass=NestIO,
            filename='0gid-1time-2gex-3Vm-1261-0.dat',
            directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        r.read_analogsignal(gid=1, t_stop=1000. * pq.ms,
                            sampling_period=pq.ms, lazy=False,
                            id_column=0, time_column=1,
                            value_column=2, value_type='V_m')
        r.read_segment(gid_list=[1], t_stop=1000. * pq.ms,
                       sampling_period=pq.ms, lazy=False, id_column_dat=0,
                       time_column_dat=1, value_columns_dat=2,
                       value_types='V_m')

        filename = get_test_file_full_path(
            ioclass=NestIO,
            filename='0gid-1time_in_steps-2Vm-1263-0.dat',
            directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        r.read_analogsignal(gid=1, t_stop=1000. * pq.ms,
                            time_unit=pq.CompoundUnit('0.1*ms'),
                            sampling_period=pq.ms, lazy=False,
                            id_column=0, time_column=1,
                            value_column=2, value_type='V_m')
        r.read_segment(gid_list=[1], t_stop=1000. * pq.ms,
                       time_unit=pq.CompoundUnit('0.1*ms'),
                       sampling_period=pq.ms, lazy=False, id_column_dat=0,
                       time_column_dat=1, value_columns_dat=2,
                       value_types='V_m')

        filename = get_test_file_full_path(
            ioclass=NestIO,
            filename='0gid-1time-2Vm-1259-0.dat',
            directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        r.read_analogsignal(gid=1, t_stop=1000. * pq.ms,
                            time_unit=pq.CompoundUnit('0.1*ms'),
                            sampling_period=pq.ms, lazy=False,
                            id_column=0, time_column=1,
                            value_column=2, value_type='V_m')
        r.read_segment(gid_list=[1], t_stop=1000. * pq.ms,
                       time_unit=pq.CompoundUnit('0.1*ms'),
                       sampling_period=pq.ms, lazy=False, id_column_dat=0,
                       time_column_dat=1, value_columns_dat=2,
                       value_types='V_m')
Esempio n. 3
0
 def test_regression_269(self):
     # This is a spike train on a 30KHz sampling, one spike at 1s, one just
     # before the end of the signal
     cu = pq.CompoundUnit("1/30000.*s")
     st = SpikeTrain(
         [30000., (self.anasig0.t_stop - 1 * pq.s).rescale(cu).magnitude],
         units=pq.CompoundUnit("1/30000.*s"),
         t_start=-1 * pq.s, t_stop=300 * pq.s)
     phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(
         elephant.signal_processing.hilbert(self.anasig0),
         st,
         interpolate=False)
     self.assertEqual(len(phases_noint[0]), 2)
Esempio n. 4
0
    def test_read_spiketrain(self):
        """
        Tests reading files in the 4 different formats:
        - without GIDs, with times as floats
        - without GIDs, with times as integers in time steps
        - with GIDs, with times as floats
        - with GIDs, with times as integers in time steps
        """
        filename = get_test_file_full_path(
            ioclass=NestIO,
            filename='0time-1255-0.gdf',
            directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        r.read_spiketrain(t_start=400. * pq.ms, t_stop=500. * pq.ms, lazy=False,
                          id_column=None, time_column=0)
        r.read_segment(t_start=400. * pq.ms, t_stop=500. * pq.ms, lazy=False,
                       id_column_gdf=None, time_column_gdf=0)

        filename = get_test_file_full_path(
            ioclass=NestIO,
            filename='0time_in_steps-1257-0.gdf',
            directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        r.read_spiketrain(t_start=400. * pq.ms, t_stop=500. * pq.ms,
                          time_unit=pq.CompoundUnit('0.1*ms'), lazy=False,
                          id_column=None, time_column=0)
        r.read_segment(t_start=400. * pq.ms, t_stop=500. * pq.ms,
                       time_unit=pq.CompoundUnit('0.1*ms'), lazy=False,
                       id_column_gdf=None, time_column_gdf=0)

        filename = get_test_file_full_path(
            ioclass=NestIO,
            filename='0gid-1time-1256-0.gdf',
            directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        r.read_spiketrain(gdf_id=1, t_start=400. * pq.ms, t_stop=500. * pq.ms,
                          lazy=False, id_column_gdf=0, time_column_gdf=1)
        r.read_segment(gid_list=[1], t_start=400. * pq.ms, t_stop=500. * pq.ms,
                       lazy=False, id_column_gdf=0, time_column_gdf=1)

        filename = get_test_file_full_path(
            ioclass=NestIO,
            filename='0gid-1time_in_steps-1258-0.gdf',
            directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        r.read_spiketrain(gdf_id=1, t_start=400. * pq.ms, t_stop=500. * pq.ms,
                          time_unit=pq.CompoundUnit('0.1*ms'), lazy=False,
                          id_column=0, time_column=1)
        r.read_segment(gid_list=[1], t_start=400. * pq.ms, t_stop=500. * pq.ms,
                       time_unit=pq.CompoundUnit('0.1*ms'), lazy=False,
                       id_column_gdf=0, time_column_gdf=1)
Esempio n. 5
0
def get_ORT(doc, trialid):
    """
    Returns consecutive object-release time for wanted trial

    Args:
        doc (odml.doc.BaseDocument):
            odML Document of reach-to-grasp project
        trialid (int):
            ID of wanted trial

    Returns:
        (quantities.quantity.Quantity):
            object-release time for wanted trial
    """
    output = {}

    ff = lambda x: x.name == 'Periods' and \
        x.parent.name == 'Trial_%03i' % trialid
    sec = [s for s in doc.itersections(filter_func=ff)][0]

    ff = lambda x: x.name == 'ORT'
    props = [p for p in sec.iterproperties(filter_func=ff)]

    for p in props:
        output[p.parent.name] = pq.Quantity(p.value.data,
                                            pq.CompoundUnit(p.value.unit))

    return output
Esempio n. 6
0
    def _check_input_sampling_period(self, sampling_period, time_column,
                                     time_unit, data):
        """
        Checks sampling period, times and time unit for consistency.

        sampling_period: pq.quantity.Quantity, sampling period of data to load.
        time_column: int, column id of times in data to load.
        time_unit: pq.quantity.Quantity, unit of time used in the data to load.
        data: numpy array, the data to be loaded / interpreted.

        Returns
        pq.quantities.Quantity object, the updated sampling period.
        """
        if sampling_period is None:
            if time_column is not None:
                data_sampling = np.unique(
                    np.diff(sorted(np.unique(data[:, 1]))))
                if len(data_sampling) > 1:
                    raise ValueError('Different sampling distances found in '
                                     'data set (%s)' % data_sampling)
                else:
                    dt = data_sampling[0]
            else:
                raise ValueError('Can not estimate sampling rate without time '
                                 'column id provided.')
            sampling_period = pq.CompoundUnit(
                str(dt) + '*' + time_unit.units.u_symbol)
        elif not isinstance(sampling_period, pq.UnitQuantity):
            raise ValueError("sampling_period is not specified as a unit.")
        return sampling_period
Esempio n. 7
0
def get_AnalogEvent(doc, event, trialid):
    """
    Returns time of specified analog event for wanted trial

    Args:
        doc (odml.doc.BaseDocument):
            odML Document of reach-to-grasp project
        event (string):
            Abbreviation of wanted analog trial event (possible)
        trialid (int):
            ID of wanted trial

    Returns:
        (dict):
            keys (str):
                States which analog signal was used to calculate the wanted
                analog event ('DisplacementSignal' or 'GripForceSignals')
            values (quantities.quantity.Quantity):
                Time of wanted analog event for wanted trial for
                corresponding analog signal
    """
    output = {}

    ff = lambda x: x.name == 'AnalogEvents' and \
        x.parent.name == 'Trial_%03i' % trialid
    sec = [s for s in doc.itersections(filter_func=ff)][0]

    ff = lambda x: x.name == event
    props = [p for p in sec.iterproperties(filter_func=ff)]

    for p in props:
        output[p.parent.name] = pq.Quantity(p.value.data,
                                            pq.CompoundUnit(p.value.unit))

    return output
Esempio n. 8
0
def get_DigitalEvent(doc, event, trialid):
    """
    Returns time of specified digital event for wanted trial

    Args:
        doc (odml.doc.BaseDocument):
            odML Document of reach-to-grasp project
        event (string):
            Abbreviation of wanted digital trial event (possible)
        trialid (int):
            ID of wanted trial

    Returns:
        (quantities.quantity.Quantity):
            Time of wanted digital event for wanted trial
    """
    ff = lambda x: x.name == 'DigitalEvents' and \
        x.parent.name == 'Trial_%03i' % trialid
    sec = [s for s in doc.itersections(filter_func=ff)][0]

    ff = lambda x: x.name == event
    output = [p for p in sec.iterproperties(filter_func=ff)][0].value.data
    unit = [p for p in sec.iterproperties(filter_func=ff)][0].value.unit

    return pq.Quantity(output, pq.CompoundUnit(unit))
Esempio n. 9
0
    def test_AnalogSignalProxy(self):
        proxy_anasig = AnalogSignalProxy(rawio=self.reader, global_channel_indexes=None,
                        block_index=0, seg_index=0,)

        assert proxy_anasig.sampling_rate == 10 * pq.kHz
        assert proxy_anasig.t_start == 0 * pq.s
        assert proxy_anasig.t_stop == 10 * pq.s
        assert proxy_anasig.duration == 10 * pq.s
        assert proxy_anasig.file_origin == 'my_filename.fake'

        # full load
        full_anasig = proxy_anasig.load(time_slice=None)
        assert isinstance(full_anasig, AnalogSignal)
        assert_same_attributes(proxy_anasig, full_anasig)

        # slice time
        anasig = proxy_anasig.load(time_slice=(2. * pq.s, 5 * pq.s))
        assert anasig.t_start == 2. * pq.s
        assert anasig.duration == 3. * pq.s
        assert anasig.shape == (30000, 16)
        assert_same_attributes(proxy_anasig.time_slice(2. * pq.s, 5 * pq.s), anasig)

        # ceil next sample when slicing
        anasig = proxy_anasig.load(time_slice=(1.99999 * pq.s, 5.000001 * pq.s))
        assert anasig.t_start == 2. * pq.s
        assert anasig.duration == 3. * pq.s
        assert anasig.shape == (30000, 16)

        # buggy time slice
        with self.assertRaises(AssertionError):
            anasig = proxy_anasig.load(time_slice=(2. * pq.s, 15 * pq.s))
        anasig = proxy_anasig.load(time_slice=(2. * pq.s, 15 * pq.s), strict_slicing=False)
        assert proxy_anasig.t_stop == 10 * pq.s

        # select channels
        anasig = proxy_anasig.load(channel_indexes=[3, 4, 9])
        assert anasig.shape[1] == 3

        # select channels and slice times
        anasig = proxy_anasig.load(time_slice=(2. * pq.s, 5 * pq.s), channel_indexes=[3, 4, 9])
        assert anasig.shape == (30000, 3)

        # magnitude mode rescaled
        anasig_float = proxy_anasig.load(magnitude_mode='rescaled')
        assert anasig_float.dtype == 'float32'
        assert anasig_float.units == pq.uV
        assert anasig_float.units == proxy_anasig.units

        # magnitude mode raw
        anasig_int = proxy_anasig.load(magnitude_mode='raw')
        assert anasig_int.dtype == 'int16'
        assert anasig_int.units == pq.CompoundUnit('0.0152587890625*uV')

        assert_arrays_almost_equal(anasig_float, anasig_int.rescale('uV'), 1e-9)

        # test array_annotations
        assert 'info' in proxy_anasig.array_annotations
        assert proxy_anasig.array_annotations['info'].size == 16
        assert 'info' in anasig_float.array_annotations
        assert anasig_float.array_annotations['info'].size == 16
Esempio n. 10
0
    def __init__(self, rawio=None, stream_index=None, inner_stream_channels=None,
                 block_index=0, seg_index=0):
        # stream_index:  indicate the stream stream_id can be retreive easily
        # inner_stream_channels: are channel index inside the stream None means all channels
        # if inner_stream_channels is not None:
        #     * then this is a "substream"
        #     * handle the case where channels have different units inside a stream
        #     * is related to BaseFromRaw.get_sub_signal_streams()

        self._rawio = rawio
        self._block_index = block_index
        self._seg_index = seg_index
        self._stream_index = stream_index
        if inner_stream_channels is None:
            inner_stream_channels = slice(inner_stream_channels)
        self._inner_stream_channels = inner_stream_channels

        signal_streams = self._rawio.header['signal_streams']
        stream_id = signal_streams[stream_index]['id']
        signal_channels = self._rawio.header['signal_channels']
        global_inds, = np.nonzero(signal_channels['stream_id'] == stream_id)
        self._nb_total_chann_in_stream = global_inds.size
        self._global_channel_indexes = global_inds[inner_stream_channels]
        self._nb_chan = self._global_channel_indexes.size

        sig_chans = signal_channels[self._global_channel_indexes]

        assert np.unique(sig_chans['units']).size == 1, 'Channel do not have same units'
        assert np.unique(sig_chans['dtype']).size == 1, 'Channel do not have same dtype'
        assert np.unique(sig_chans['sampling_rate']).size == 1, \
                    'Channel do not have same sampling_rate'

        self.units = ensure_signal_units(sig_chans['units'][0])
        self.dtype = sig_chans['dtype'][0]
        self.sampling_rate = sig_chans['sampling_rate'][0] * pq.Hz
        self.sampling_period = 1. / self.sampling_rate
        sigs_size = self._rawio.get_signal_size(block_index=block_index, seg_index=seg_index,
                                        stream_index=stream_index)
        self.shape = (sigs_size, self._nb_chan)
        self.t_start = self._rawio.get_signal_t_start(block_index, seg_index, stream_index) * pq.s

        # magnitude_mode='raw' is supported only if all offset=0
        # and all gain are the same
        support_raw_magnitude = np.all(sig_chans['gain'] == sig_chans['gain'][0]) and \
                                                    np.all(sig_chans['offset'] == 0.)

        if support_raw_magnitude:
            str_units = ensure_signal_units(sig_chans['units'][0]).units.dimensionality.string
            gain0 = sig_chans['gain'][0]
            self._raw_units = pq.CompoundUnit(f'{gain0}*{str_units}')
        else:
            self._raw_units = None

        # retrieve annotations and array annotations
        seg_ann = self._rawio.raw_annotations['blocks'][block_index]['segments'][seg_index]
        annotations = seg_ann['signals'][stream_index].copy()
        array_annotations = annotations.pop('__array_annotations__')
        array_annotations = {k: v[inner_stream_channels] for k, v in array_annotations.items()}

        BaseProxy.__init__(self, array_annotations=array_annotations, **annotations)
Esempio n. 11
0
    def test_notimeid(self):
        """
        Test for warning, when no time column id was provided.
        """

        filename = get_test_file_full_path(
            ioclass=NestIO,
            filename='0gid-1time-2gex-1262-0.dat',
            directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)

        t_start_targ = 450. * pq.ms
        t_stop_targ = 460. * pq.ms
        sampling_period = pq.CompoundUnit('5*ms')

        with warnings.catch_warnings(record=True) as w:
            # Cause all warnings to always be triggered.
            warnings.simplefilter("always")
            seg = r.read_segment(gid_list=[], t_start=t_start_targ,
                                 sampling_period=sampling_period,
                                 t_stop=t_stop_targ, lazy=False,
                                 id_column_dat=0, time_column_dat=None,
                                 value_columns_dat=2, value_types='V_m')
            # Verify number and content of warning
            self.assertEqual(len(w), 1)
            self.assertIn("no time column id", str(w[0].message))
        sts = seg.analogsignals
        for st in sts:
            self.assertTrue(st.t_start == 1 * 5 * pq.ms)
            self.assertTrue(
                st.t_stop == len(st) * sampling_period + 1 * 5 * pq.ms)
Esempio n. 12
0
    def test_read_segment(self):
        """Read data in a certain time range into one block"""

        nio = NeuralynxIO(self.sn, use_cache='never')
        seg = nio.read_segment(t_start=None, t_stop=None)

        self.assertEqual(len(seg.analogsignals), 1)
        self.assertEqual(seg.analogsignals[0].shape[-1], 5)

        self.assertEqual(seg.analogsignals[0].sampling_rate.units,
                         pq.CompoundUnit('32*kHz'))

        self.assertEqual(len(seg.spiketrains), 0)

        # Testing different parameter combinations
        seg = nio.read_segment(lazy=True)
        self.assertEqual(len(seg.analogsignals[0]), 0)
        self.assertEqual(len(seg.spiketrains), 0)

        seg = nio.read_segment(cascade=False)
        self.assertEqual(len(seg.analogsignals), 0)
        self.assertEqual(len(seg.spiketrains), 0)

        seg = nio.read_segment(electrode_list=[0])
        self.assertEqual(len(seg.analogsignals), 1)

        seg = nio.read_segment(t_start=None, t_stop=None, events=True,
                               waveforms=True)
        self.assertEqual(len(seg.analogsignals), 1)
        self.assertEqual(len(seg.spiketrains), 0)
        self.assertTrue(len(seg.events) > 0)
    def test_read_block(self):
        """Read data in a certain time range into one block"""
        t_start, t_stop = 3 * pq.s, 4 * pq.s

        nio = NeuralynxIO(self.sn, use_cache='never')
        block = nio.read_block(t_starts=[t_start], t_stops=[t_stop])
        self.assertEqual(len(nio.parameters_ncs), 2)
        self.assertTrue({
            'event_id': 11,
            'name': 'Starting Recording',
            'nttl': 0
        } in nio.parameters_nev['Events.nev']['event_types'])

        # Everything put in one segment
        self.assertEqual(len(block.segments), 1)
        seg = block.segments[0]
        self.assertEqual(len(seg.analogsignals), 1)
        self.assertEqual(seg.analogsignals[0].shape[-1], 2)

        self.assertEqual(seg.analogsignals[0].sampling_rate.units,
                         pq.CompoundUnit('32*kHz'))
        self.assertEqual(seg.analogsignals[0].t_start, t_start)
        self.assertEqual(seg.analogsignals[0].t_stop, t_stop)
        self.assertEqual(len(seg.spiketrains), 2)

        # Testing different parameter combinations
        block = nio.read_block(lazy=True)
        self.assertEqual(len(block.segments[0].analogsignals[0]), 0)
        self.assertEqual(len(block.segments[0].spiketrains[0]), 0)

        block = nio.read_block(cascade=False)
        self.assertEqual(len(block.segments), 0)

        block = nio.read_block(electrode_list=[0])
        self.assertEqual(len(block.segments[0].analogsignals), 1)
        self.assertEqual(len(block.channel_indexes[-1].units), 1)

        block = nio.read_block(t_starts=None,
                               t_stops=None,
                               events=True,
                               waveforms=True)
        self.assertEqual(len(block.segments[0].analogsignals), 1)
        self.assertEqual(len(block.segments[0].spiketrains), 2)
        self.assertEqual(len(block.segments[0].spiketrains[0].waveforms),
                         len(block.segments[0].spiketrains[0]))
        self.assertGreater(len(block.segments[0].events), 0)
        self.assertEqual(len(block.channel_indexes[-1].units), 2)

        block = nio.read_block(t_starts=[t_start],
                               t_stops=[t_stop],
                               unit_list=[0],
                               electrode_list=[0])
        self.assertEqual(len(block.channel_indexes[-1].units), 1)

        block = nio.read_block(t_starts=[t_start],
                               t_stops=[t_stop],
                               unit_list=False)
        self.assertEqual(len(block.channel_indexes[-1].units), 0)
Esempio n. 14
0
 def test_single_gid(self):
     filename = self.get_local_path('nest/N1-0gid-1time-2Vm-1265-0.dat')
     r = NestIO(filenames=filename)
     anasig = r.read_analogsignal(gid=1, t_stop=1000. * pq.ms,
                                  time_unit=pq.CompoundUnit('0.1*ms'),
                                  sampling_period=pq.ms, lazy=False,
                                  id_column=0, time_column=1,
                                  value_column=2, value_type='V_m')
     assert anasig.annotations['id'] == 1
Esempio n. 15
0
 def __search_unit(self, unit_name: str):
     unit_dict = self.unit_dict
     if unit_dict and unit_name in unit_dict:
         unit_obj = unit_dict[unit_name]
     elif hasattr(pq, unit_name):
         unit_obj = getattr(pq, unit_name)
     else:
         unit_obj = pq.CompoundUnit(unit_name)
     return unit_obj
Esempio n. 16
0
 def test_no_gid(self):
     filename = self.get_local_path('nest/N1-0time-1Vm-1266-0.dat')
     r = NestIO(filenames=filename)
     anasig = r.read_analogsignal(gid=None, t_stop=1000. * pq.ms,
                                  time_unit=pq.CompoundUnit('0.1*ms'),
                                  sampling_period=pq.ms, lazy=False,
                                  id_column=None, time_column=0,
                                  value_column=1, value_type='V_m')
     self.assertEqual(anasig.annotations['id'], None)
     self.assertEqual(len(anasig), 19)
Esempio n. 17
0
    def __init__(self, rawio=None, global_channel_indexes=None, block_index=0, seg_index=0):
        self._rawio = rawio
        self._block_index = block_index
        self._seg_index = seg_index
        if global_channel_indexes is None:
            global_channel_indexes = slice(None)
        total_nb_chan = self._rawio.header['signal_channels'].size
        self._global_channel_indexes = np.arange(total_nb_chan)[global_channel_indexes]
        self._nb_chan = self._global_channel_indexes.size

        sig_chans = self._rawio.header['signal_channels'][self._global_channel_indexes]

        assert np.unique(sig_chans['units']).size == 1, 'Channel do not have same units'
        assert np.unique(sig_chans['dtype']).size == 1, 'Channel do not have same dtype'
        assert np.unique(sig_chans['sampling_rate']).size == 1, \
                    'Channel do not have same sampling_rate'

        self.units = ensure_signal_units(sig_chans['units'][0])
        self.dtype = sig_chans['dtype'][0]
        self.sampling_rate = sig_chans['sampling_rate'][0] * pq.Hz
        self.sampling_period = 1. / self.sampling_rate
        
        sigs_size = self._rawio.get_signal_size(block_index=block_index, seg_index=seg_index,
                                        channel_indexes=self._global_channel_indexes)
        self.shape = (sigs_size, self._nb_chan)
        self.t_start = self._rawio.get_signal_t_start(block_index, seg_index,
                                    self._global_channel_indexes) * pq.s

        # magnitude_mode='raw' is supported only if all offset=0
        # and all gain are the same
        support_raw_magnitude = np.all(sig_chans['gain'] == sig_chans['gain'][0]) and \
                                                    np.all(sig_chans['offset'] == 0.)

        if support_raw_magnitude:
            str_units = ensure_signal_units(sig_chans['units'][0]).units.dimensionality.string
            self._raw_units = pq.CompoundUnit('{}*{}'.format(sig_chans['gain'][0], str_units))
        else:
            self._raw_units = None

        # both necessary attr and annotations
        annotations = {}
        annotations['name'] = self._make_name(None)
        if len(sig_chans) == 1:
            # when only one channel raw_annotations are set to standart annotations
            d = self._rawio.raw_annotations['blocks'][block_index]['segments'][seg_index][
                'signals'][self._global_channel_indexes[0]]
            annotations.update(d)

        array_annotations = {
            'channel_names': np.array(sig_chans['name'], copy=True),
            'channel_ids': np.array(sig_chans['id'], copy=True),
        }

        BaseProxy.__init__(self, array_annotations=array_annotations, **annotations)
Esempio n. 18
0
 def test_single_gid(self):
     filename = get_test_file_full_path(
         ioclass=NestIO,
         filename='N1-0gid-1time-2Vm-1265-0.dat',
         directory=self.local_test_dir, clean=False)
     r = NestIO(filenames=filename)
     anasig = r.read_analogsignal(gid=1, t_stop=1000. * pq.ms,
                                  time_unit=pq.CompoundUnit('0.1*ms'),
                                  sampling_period=pq.ms, lazy=False,
                                  id_column=0, time_column=1,
                                  value_column=2, value_type='V_m')
     assert anasig.annotations['id'] == 1
Esempio n. 19
0
    def test_multiple_value_columns(self):
        """
        Test for simultaneous loading of multiple columns from dat file.
        """
        filename = self.get_local_path('nest/0gid-1time-2Vm-3Iex-4Iin-1264-0.dat')
        r = NestIO(filenames=filename)

        sampling_period = pq.CompoundUnit('5*ms')
        seg = r.read_segment(gid_list=[1001],
                             value_columns_dat=[2, 3],
                             sampling_period=sampling_period)
        anasigs = seg.analogsignals
        self.assertEqual(len(anasigs), 2)
Esempio n. 20
0
    def _center_and_scale(self, vmin, vmax, units='g', g=GRAVITY):
        """scale voltage (or raw) signals to m/s^2

        For details on accelerometer calibration see
        http://intantech.com/files/Intan_RHD2000_accelerometer_calibration.pdf

        Parameters
        ==========
        vmin : array-like
            minimum voltages for each dimension (corresponding to
            -g = -9.81 m/s^2)
        vmax : array-like
            maximum voltages for each dimension (corresponding to
            g = 9.81 m/s^2)
        units : str
            Either 'm/s^2' or 'g' for gratitaional acceleration (9.81 m/s^2)
        g : float
            The value for g -> m/s^2 conversion; don't use quantities'
            predefined g_0 as it is a bit tricky to then convert it to m/s^2.
        """

        vmin = pq.Quantity(np.asarray(vmin), pq.volt)
        vmax = pq.Quantity(np.asarray(vmax), pq.volt)

        if not hasattr(g, 'units'):
            g = pq.Quantity(g, pq.CompoundUnit('m/s^2'))

        # remove static acceleration
        bias = vmin + .5 * (vmax - vmin)
        self.xyz -= bias

        # scale to g (= static acceleration range (-1, 1))
        self.xyz /= (vmax - vmin) / (2 * g.units)

        if units == pq.CompoundUnit('m/s^2') or units == 'm/s^2':

            self.xyz *= g.item()
            self.xyz *= pq.CompoundUnit('m/s^2')
Esempio n. 21
0
def get_ITI(doc, trialid):
    """
    Returns consecutive inter-trial-interval for wanted trial

    Args:
        doc (odml.doc.BaseDocument):
            odML Document of reach-to-grasp project
        trialid (int):
            ID of wanted trial

    Returns:
        (quantities.quantity.Quantity):
            inter-trial-interval for wanted trial
    """
    ff = lambda x: x.name == 'Periods' and \
        x.parent.name == 'Trial_%03i' % trialid
    sec = [s for s in doc.itersections(filter_func=ff)][0]

    ff = lambda x: x.name == 'ITT'
    output = [p for p in sec.iterproperties(filter_func=ff)][0].value.data
    unit = [p for p in sec.iterproperties(filter_func=ff)][0].value.unit

    return pq.Quantity(output, pq.CompoundUnit(unit))
Esempio n. 22
0
    def test_signals_compound_units(self):
        block = Block()
        seg = Segment()
        block.segments.append(seg)

        units = pq.CompoundUnit("1/30000*V")
        srate = pq.Quantity(10, pq.CompoundUnit("1.0/10 * Hz"))
        asig = AnalogSignal(signal=self.rquant((10, 3), units),
                            sampling_rate=srate)
        seg.analogsignals.append(asig)

        self.write_and_compare([block])

        anotherblock = Block("ir signal block")
        seg = Segment("ir signal seg")
        anotherblock.segments.append(seg)
        irsig = IrregularlySampledSignal(signal=np.random.random((20, 3)),
                                         times=self.rquant(
                                             20, pq.CompoundUnit("0.1 * ms"),
                                             True),
                                         units=pq.CompoundUnit("10 * V / s"))
        seg.irregularlysampledsignals.append(irsig)
        self.write_and_compare([block, anotherblock])

        block.segments[0].analogsignals.append(
            AnalogSignal(signal=[10.0, 1.0, 3.0],
                         units=pq.S,
                         sampling_period=pq.Quantity(3, "s"),
                         dtype=np.double,
                         name="signal42",
                         description="this is an analogsignal",
                         t_start=45 * pq.CompoundUnit("3.14 * s")), )
        self.write_and_compare([block, anotherblock])

        times = self.rquant(10, pq.CompoundUnit("3 * year"), True)
        block.segments[0].irregularlysampledsignals.append(
            IrregularlySampledSignal(times=times,
                                     signal=np.random.random((10, 3)),
                                     units="mV",
                                     dtype=np.float,
                                     name="some sort of signal",
                                     description="the signal is described"))

        self.write_and_compare([block, anotherblock])
Esempio n. 23
0
def instantaneous_rate(spiketrain,
                       sampling_period,
                       kernel='auto',
                       cutoff=5.0,
                       t_start=None,
                       t_stop=None,
                       trim=False):
    """
    Estimates instantaneous firing rate by kernel convolution.

    Parameters
    -----------
    spiketrain : 'neo.SpikeTrain'
        Neo object that contains spike times, the unit of the time stamps
        and t_start and t_stop of the spike train.
    sampling_period : Time Quantity
        Time stamp resolution of the spike times. The same resolution will
        be assumed for the kernel
    kernel : string 'auto' or callable object of :class:`Kernel` from module
        'kernels.py'. Currently implemented kernel forms are rectangular,
        triangular, epanechnikovlike, gaussian, laplacian, exponential,
        and alpha function.
        Example: kernel = kernels.RectangularKernel(sigma=10*ms, invert=False)
        The kernel is used for convolution with the spike train and its
        standard deviation determines the time resolution of the instantaneous
        rate estimation.
        Default: 'auto'. In this case, the optimized kernel width for the 
        rate estimation is calculated according to [1] and with this width
        a gaussian kernel is constructed. Automatized calculation of the 
        kernel width is not available for other than gaussian kernel shapes.
    cutoff : float
        This factor determines the cutoff of the probability distribution of
        the kernel, i.e., the considered width of the kernel in terms of 
        multiples of the standard deviation sigma.
        Default: 5.0
    t_start : Time Quantity (optional)
        Start time of the interval used to compute the firing rate. If None
        assumed equal to spiketrain.t_start
        Default: None
    t_stop : Time Quantity (optional)
        End time of the interval used to compute the firing rate (included).
        If None assumed equal to spiketrain.t_stop
        Default: None
    trim : bool
        if False, the output of the Fast Fourier Transformation being a longer
        vector than the input vector by the size of the kernel is reduced back
        to the original size of the considered time interval of the spiketrain
        using the median of the kernel.
        if True, only the region of the convolved signal is returned, where
        there is complete overlap between kernel and spike train. This is
        achieved by reducing the length of the output of the Fast Fourier
        Transformation by a total of two times the size of the kernel, and
        t_start and t_stop are adjusted.
        Default: False

    Returns
    -------
    rate : neo.AnalogSignal
        Contains the rate estimation in unit hertz (Hz).
        Has a property 'rate.times' which contains the time axis of the rate
        estimate. The unit of this property is the same as the resolution that
        is given via the argument 'sampling_period' to the function.

    Raises
    ------
    TypeError:
        If `spiketrain` is not an instance of :class:`SpikeTrain` of Neo.
        If `sampling_period` is not a time quantity.
        If `kernel` is neither instance of :class:`Kernel` or string 'auto'.
        If `cutoff` is neither float nor int.
        If `t_start` and `t_stop` are neither None nor a time quantity.
        If `trim` is not bool.

    ValueError:
        If `sampling_period` is smaller than zero.

    Example
    --------
    kernel = kernels.AlphaKernel(sigma = 0.05*s, invert = True)
    rate = instantaneous_rate(spiketrain, sampling_period = 2*ms, kernel)

    References
    ----------
    ..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.

    """
    # Checks of input variables:
    if not isinstance(spiketrain, SpikeTrain):
        raise TypeError(
            "spiketrain must be instance of :class:`SpikeTrain` of Neo!\n"
            "    Found: %s, value %s" % (type(spiketrain), str(spiketrain)))

    if not (isinstance(sampling_period, pq.Quantity)
            and sampling_period.dimensionality.simplified == pq.Quantity(
                1, "s").dimensionality):
        raise TypeError("The sampling period must be a time quantity!\n"
                        "    Found: %s, value %s" %
                        (type(sampling_period), str(sampling_period)))

    if sampling_period.magnitude < 0:
        raise ValueError("The sampling period must be larger than zero.")

    if kernel == 'auto':
        kernel_width = sskernel(spiketrain.magnitude, tin=None,
                                bootstrap=True)['optw']
        unit = spiketrain.units
        sigma = 1 / (2.0 * 2.7) * kernel_width * unit
        # factor 2.0 connects kernel width with its half width,
        # factor 2.7 connects half width of Gaussian distribution with
        #             99% probability mass with its standard deviation.
        kernel = kernels.GaussianKernel(sigma)
    elif not isinstance(kernel, kernels.Kernel):
        raise TypeError("kernel must be either instance of :class:`Kernel` "
                        "or the string 'auto'!\n"
                        "    Found: %s, value %s" %
                        (type(kernel), str(kernel)))

    if not (isinstance(cutoff, float) or isinstance(cutoff, int)):
        raise TypeError("cutoff must be float or integer!")

    if not (t_start is None or (isinstance(t_start, pq.Quantity)
                                and t_start.dimensionality.simplified
                                == pq.Quantity(1, "s").dimensionality)):
        raise TypeError("t_start must be a time quantity!")

    if not (t_stop is None or (isinstance(t_stop, pq.Quantity)
                               and t_stop.dimensionality.simplified
                               == pq.Quantity(1, "s").dimensionality)):
        raise TypeError("t_stop must be a time quantity!")

    if not (isinstance(trim, bool)):
        raise TypeError("trim must be bool!")

    # main function:
    units = pq.CompoundUnit("%s*s" %
                            str(sampling_period.rescale('s').magnitude))
    spiketrain = spiketrain.rescale(units)
    if t_start is None:
        t_start = spiketrain.t_start
    else:
        t_start = t_start.rescale(spiketrain.units)

    if t_stop is None:
        t_stop = spiketrain.t_stop
    else:
        t_stop = t_stop.rescale(spiketrain.units)

    time_vector = np.zeros(int((t_stop - t_start)) + 1)

    spikes_slice = spiketrain.time_slice(t_start, t_stop) \
        if len(spiketrain) else np.array([])

    for spike in spikes_slice:
        index = int((spike - t_start))
        time_vector[index] += 1

    if cutoff < kernel.min_cutoff:
        cutoff = kernel.min_cutoff
        warnings.warn("The width of the kernel was adjusted to a minimally "
                      "allowed width.")

    t_arr = np.arange(
        -cutoff * kernel.sigma.rescale(units).magnitude,
        cutoff * kernel.sigma.rescale(units).magnitude +
        sampling_period.rescale(units).magnitude,
        sampling_period.rescale(units).magnitude) * units

    r = scipy.signal.fftconvolve(time_vector,
                                 kernel(t_arr).rescale(pq.Hz).magnitude,
                                 'full')
    if np.any(r < 0):
        warnings.warn("Instantaneous firing rate approximation contains "
                      "negative values, possibly caused due to machine "
                      "precision errors.")

    if not trim:
        r = r[kernel.median_index(t_arr):-(kernel(t_arr).size -
                                           kernel.median_index(t_arr))]
    elif trim:
        r = r[2 * kernel.median_index(t_arr):-2 *
              (kernel(t_arr).size - kernel.median_index(t_arr))]
        t_start += kernel.median_index(t_arr) * spiketrain.units
        t_stop -= (kernel(t_arr).size -
                   kernel.median_index(t_arr)) * spiketrain.units

    rate = neo.AnalogSignal(signal=r.reshape(r.size, 1),
                            sampling_period=sampling_period,
                            units=pq.Hz,
                            t_start=t_start,
                            t_stop=t_stop)

    return rate
Esempio n. 24
0
def oldfct_instantaneous_rate(spiketrain,
                              sampling_period,
                              form,
                              sigma='auto',
                              t_start=None,
                              t_stop=None,
                              acausal=True,
                              trim=False):  # pragma: no cover
    """
    Estimate instantaneous firing rate by kernel convolution.

    Parameters
    -----------
    spiketrain: 'neo.SpikeTrain'
        Neo object that contains spike times, the unit of the time stamps
        and t_start and t_stop of the spike train.
    sampling_period : Quantity
        time stamp resolution of the spike times. the same resolution will
        be assumed for the kernel
    form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
        Kernel form. Currently implemented forms are BOX (boxcar),
        TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
        ALP (alpha function). EXP and ALP are asymmetric kernel forms and
        assume optional parameter `direction`.
    sigma : string or Quantity
        Standard deviation of the distribution associated with kernel shape.
        This parameter defines the time resolution of the kernel estimate
        and makes different kernels comparable (cf. [1] for symmetric kernels).
        This is used here as an alternative definition to the cut-off
        frequency of the associated linear filter.
        Default value is 'auto'. In this case, the optimized kernel width for
        the rate estimation is calculated according to [1]. Note that the
        automatized calculation of the kernel width ONLY works for gaussian
        kernel shapes!
    t_start : Quantity (Optional)
        start time of the interval used to compute the firing rate, if None
        assumed equal to spiketrain.t_start
        Default:None
    t_stop : Qunatity
        End time of the interval used to compute the firing rate (included).
        If none assumed equal to spiketrain.t_stop
        Default:None
    acausal : bool
        if True, acausal filtering is used, i.e., the gravity center of the
        filter function is aligned with the spike to convolve
        Default:None
    m_idx : int
        index of the value in the kernel function vector that corresponds
        to its gravity center. this parameter is not mandatory for
        symmetrical kernels but it is required when asymmetrical kernels
        are to be aligned at their gravity center with the event times if None
        is assumed to be the median value of the kernel support
        Default : None
    trim : bool
        if True, only the 'valid' region of the convolved
        signal are returned, i.e., the points where there
        isn't complete overlap between kernel and spike train
        are discarded
        NOTE: if True and an asymmetrical kernel is provided
        the output will not be aligned with [t_start, t_stop]

    Returns
    -------
    rate : neo.AnalogSignal
        Contains the rate estimation in unit hertz (Hz).
        Has a property 'rate.times' which contains the time axis of the rate
        estimate. The unit of this property is the same as the resolution that
        is given as an argument to the function.

    Raises
    ------
    TypeError:
        If argument value for the parameter `sigma` is not a quantity object
        or string 'auto'.

    See also
    --------
    elephant.statistics.make_kernel

    References
    ----------
    ..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.
    """
    warnings.simplefilter('always', DeprecationWarning)
    warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
    if sigma == 'auto':
        form = 'GAU'
        unit = spiketrain.units
        kernel_width = sskernel(spiketrain.magnitude, tin=None,
                                bootstrap=True)['optw']
        sigma = kw2sigma(form) * kernel_width * unit
    elif not isinstance(sigma, pq.Quantity):
        raise TypeError('sigma must be either a quantities object or "auto".'
                        ' Found: %s, value %s' % (type(sigma), str(sigma)))

    kernel, norm, m_idx = make_kernel(form=form,
                                      sigma=sigma,
                                      sampling_period=sampling_period)
    units = pq.CompoundUnit("%s*s" %
                            str(sampling_period.rescale('s').magnitude))
    spiketrain = spiketrain.rescale(units)
    if t_start is None:
        t_start = spiketrain.t_start
    else:
        t_start = t_start.rescale(spiketrain.units)

    if t_stop is None:
        t_stop = spiketrain.t_stop
    else:
        t_stop = t_stop.rescale(spiketrain.units)

    time_vector = np.zeros(int((t_stop - t_start)) + 1)

    spikes_slice = spiketrain.time_slice(t_start, t_stop) \
        if len(spiketrain) else np.array([])

    for spike in spikes_slice:
        index = int((spike - t_start))
        time_vector[index] += 1

    r = norm * scipy.signal.fftconvolve(time_vector, kernel, 'full')
    if np.any(r < 0):
        warnings.warn('Instantaneous firing rate approximation contains '
                      'negative values, possibly caused due to machine '
                      'precision errors')

    if acausal:
        if not trim:
            r = r[m_idx:-(kernel.size - m_idx)]

        elif trim:
            r = r[2 * m_idx:-2 * (kernel.size - m_idx)]
            t_start = t_start + m_idx * spiketrain.units
            t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units

    else:
        if not trim:
            r = r[m_idx:-(kernel.size - m_idx)]

        elif trim:
            r = r[2 * m_idx:-2 * (kernel.size - m_idx)]
            t_start = t_start + m_idx * spiketrain.units
            t_stop = t_stop - ((kernel.size) - m_idx) * spiketrain.units

    rate = neo.AnalogSignal(signal=r.reshape(r.size, 1),
                            sampling_period=sampling_period,
                            units=pq.Hz,
                            t_start=t_start)

    return rate, sigma
Esempio n. 25
0
def create_quantity(values, unitstr):
    if "*" in unitstr:
        unit = pq.CompoundUnit(stringify(unitstr))
    else:
        unit = unitstr
    return pq.Quantity(values, unit)
Esempio n. 26
0
    f = open(i).readlines()
    for linenumber, line in enumerate(f):
        if 'Local parameters and GOALs for phase ' in line and 'amorphous' not in line:
            filenamelist.append(filename)
            phasename = line.split('GOALs for phase ')[1].replace('\n', '')
            phaselist.append(phasename)
        split0 = line.split('=')
        if split0[0] == 'UNIT':
            if 'NM' in split0[1]:
                unitoflength = pq.nm
            lata = latb = latc = uq(0, unitoflength, 0)
            TDS100 = TDS010 = TDS001 = TDS = uq(0, unitoflength**2, 0)
            GrainSize100 = GrainSize010 = GrainSize001 = uq(0, unitoflength, 0)
            MicroStrain100 = MicroStrain010 = MicroStrain001 = uq(
                0, pq.CompoundUnit('m/m'), 0)
            Textur100 = Textur010 = Textur001 = Gewicht = uq(
                1, pq.dimensionless, 0)
        ####
        if split0[0] == 'XrayDensity':
            XrayDensity0 = float(split0[1])
        if split0[0] == 'A':
            if '+-' in split0[1]:
                lata = uq(float(split0[1].split('+-')[0]), unitoflength,
                          float(split0[1].split('+-')[1]))
            elif 'UNDEF' not in split0[1] and 'ERROR' not in split0[1]:
                lata = uq(float(split0[1]), unitoflength, 0)
            latb = latc = lata
        if split0[0] == 'B':
            if '+-' in split0[1]:
                latb = uq(float(split0[1].split('+-')[0]), unitoflength,
Esempio n. 27
0
"""
    sonde.quantities
    ~~~~~~~~~~~~~~~~

    This module contains a few custom quantities that are used
    primarily for unit conversion.
"""
from __future__ import absolute_import
import quantities as pq

#: Unit of concentration - milligrams per liter
mgl = pq.UnitQuantity('Concentration', pq.CompoundUnit("mg/L"), symbol='mg/L')

#: Unit of specific conductivity - milliSiemens per centimeter
mScm = pq.UnitQuantity('Specific Conductivity in MilliSiemens per Centimeter',
                       pq.CompoundUnit("1e-3*S/cm"),
                       symbol='mS/cm')

#: Unit of turbidity - nephelometric turbidity units
ntu = pq.UnitQuantity('Turbidity', pq.dimensionless, symbol='NTU')

#: Unit of salinity - practical salinity units
psu = pq.UnitQuantity('Salinity', pq.dimensionless, symbol='PSU')
ppt = psu

# nickname for dimensionless
dl = pq.dimensionless

#unit of speed
mps = pq.UnitQuantity('Speed', pq.m / pq.second, symbol='m/s')
Esempio n. 28
0
 def test_compound_quantities(self):
     m1 = skdb.Unit("m**2/m**3")
     m2 = quantities.CompoundUnit("m**2/m**3")
     self.assertTrue(m1.compatible(m2))
Esempio n. 29
0
    def test_compare_blackrockio_with_matlabloader_v21(self):
        """
        This test compares the output of BlackrockIO.read_block() with the
        output generated by a Matlab implementation of a Blackrock file reader
        provided by the company. The output for comparison is provided in a
        .mat file created by the script create_data_matlab_blackrock.m.
        The function tests LFPs, spike times, and digital events.
        """

        dirname = get_test_file_full_path(ioclass=BlackrockIO,
                                          filename='blackrock_2_1/l101210-001',
                                          directory=self.local_test_dir,
                                          clean=False)
        # First run with parameters for ns5, then run with correct parameters for ns2
        parameters = [('blackrock_2_1/l101210-001_nev-02_ns5.mat', {
            'nsx_to_load': 5,
            'nev_override': '-'.join([dirname, '02'])
        }), ('blackrock_2_1/l101210-001.mat', {
            'nsx_to_load': 2
        })]
        for index, param in enumerate(parameters):
            # Load data from matlab generated files
            ml = scipy.io.loadmat(
                get_test_file_full_path(ioclass=BlackrockIO,
                                        filename=param[0],
                                        directory=self.local_test_dir,
                                        clean=False))
            lfp_ml = ml['lfp']  # (channel x time) LFP matrix
            ts_ml = ml['ts']  # spike time stamps
            elec_ml = ml['el']  # spike electrodes
            unit_ml = ml['un']  # spike unit IDs
            wf_ml = ml['wf']  # waveforms
            mts_ml = ml['mts']  # marker time stamps
            mid_ml = ml['mid']  # marker IDs

            # Load data from original data files using the Neo BlackrockIO
            session = BlackrockIO(dirname, verbose=False, **param[1])
            block = session.read_block(load_waveforms=True,
                                       signal_group_mode='split-all')
            # Check if analog data are equal
            self.assertGreater(len(block.channel_indexes), 0)
            for i, chidx in enumerate(block.channel_indexes):
                # Break for ChannelIndexes for Units that don't contain any Analogsignals
                if len(chidx.analogsignals) == 0 and len(chidx.units) >= 1:
                    break
                # Should only have one AnalogSignal per ChannelIndex
                self.assertEqual(len(chidx.analogsignals), 1)

                # Find out channel_id in order to compare correctly
                idx = chidx.analogsignals[0].annotations['channel_id']
                # Get data of AnalogSignal without pq.units
                anasig = np.squeeze(chidx.analogsignals[0].base[:].magnitude)
                # Test for equality of first nonzero values of AnalogSignal
                #                                   and matlab file contents
                # If not equal test if hardcoded gain is responsible for this
                # See BlackrockRawIO ll. 1420 commit 77a645655605ae39eca2de3ee511f3b522f11bd7
                j = 0
                while anasig[j] == 0:
                    j += 1
                if lfp_ml[i, j] != np.squeeze(
                        chidx.analogsignals[0].base[j].magnitude):
                    anasig = anasig / 152.592547
                    anasig = np.round(anasig).astype(int)

                # Special case because id 142 is not included in ns2 file
                if idx == 143:
                    idx -= 1
                if idx > 128:
                    idx = idx - 136

                assert_equal(anasig, lfp_ml[idx - 1, :])

            # Check if spikes are equal
            self.assertEqual(len(block.segments), 1)
            for st_i in block.segments[0].spiketrains:
                channelid = st_i.annotations['channel_id']
                unitid = st_i.annotations['unit_id']

                # Compare waveforms
                matlab_wf = wf_ml[np.nonzero(
                    np.logical_and(elec_ml == channelid, unit_ml ==
                                   unitid)), :][0]
                # Atleast_2d as correction for waveforms that are saved
                # in single dimension in SpikeTrain
                # because only one waveform is available
                assert_equal(
                    np.atleast_2d(np.squeeze(st_i.waveforms).magnitude),
                    matlab_wf)

                # Compare spike timestamps
                matlab_spikes = ts_ml[np.nonzero(
                    np.logical_and(elec_ml == channelid, unit_ml == unitid))]
                # Going sure that unit is really seconds and not 1/30000 seconds
                if (not st_i.units == pq.CompoundUnit("1.0/{0} * s".format(30000))) and \
                        st_i.units == pq.s:
                    st_i = np.round(st_i.base * 30000).astype(int)
                assert_equal(st_i, matlab_spikes)

            # Check if digital input port events are equal
            self.assertGreater(len(block.segments[0].events), 0)
            for ea_i in block.segments[0].events:
                if ea_i.name == 'digital_input_port':
                    # Get all digital event IDs in this recording
                    marker_ids = set(ea_i.labels)
                    for marker_id in marker_ids:
                        python_digievents = np.round(
                            ea_i.times.base[ea_i.labels == marker_id] *
                            30000).astype(int)
                        matlab_digievents = mts_ml[np.nonzero(
                            mid_ml == int(marker_id))]
                        assert_equal(python_digievents, matlab_digievents)
Esempio n. 30
0
# needed for Python3 compatibility
from __future__ import absolute_import

import os.path
import warnings
from datetime import datetime
import numpy as np
import quantities as pq

from neo.io.baseio import BaseIO
from neo.core import Block, Segment, SpikeTrain, AnalogSignal

value_type_dict = {
    'V': pq.mV,
    'I': pq.pA,
    'g': pq.CompoundUnit("10^-9*S"),
    'no type': pq.dimensionless
}


class NestIO(BaseIO):
    """
    Class for reading NEST output files. GDF files for the spike data and DAT
    files for analog signals are possible.

    Usage:
        >>> from neo.io.nestio import NestIO

        >>> files = ['membrane_voltages-1261-0.dat',
                 'spikes-1258-0.gdf']
        >>> r = NestIO(filenames=files)