Пример #1
0
full_spike_train = sorting2.get_unit_spike_train(unit_id=1)
firing_rate = float(len(full_spike_train)) / sorting2.get_sampling_frequency()
sorting2.set_unit_property(unit_id=1,
                           property_name='firing_rate',
                           value=firing_rate)
print('Average firing rate during the recording of unit 1 = {}'.format(
    sorting2.get_unit_property(unit_id=1, property_name='firing_rate')))
print("Spike property names: " +
      str(sorting2.get_shared_unit_property_names()))

##############################################################################
# :code:`SubSortingExtractor` objects can be used to extract arbitrary subsets of your units/spike trains manually

sorting3 = se.SubSortingExtractor(parent_sorting=sorting2,
                                  unit_ids=[1, 2],
                                  start_frame=10000,
                                  end_frame=20000)
print('Num. units = {}'.format(len(sorting3.get_unit_ids())))
print('Average firing rate of units1 during frames 10000-20000 = {}'.format(
    float(len(sorting3.get_unit_spike_train(unit_id=1))) / 6000))

##############################################################################
# Unit features are name value pairs that we can store for each spike. Let's load a randomly generated 'random_value'
# features. Features are used, for example, to store waveforms, amplitude, and PCA scores

random_values = np.random.randn(len(sorting3.get_unit_spike_train(unit_id=1)))
sorting3.set_unit_spike_features(unit_id=1,
                                 feature_name='random_value',
                                 value=random_values)
print("Spike feature names: " +
      str(sorting3.get_shared_unit_spike_feature_names()))
Пример #2
0
    recordings_list.append(recording_single)


##############################################################################
# We can now use the :code:`recordings_list` to instantiate a :code:`MultiRecordingTimeExtractor`, which concatenates
# the traces in time:

multirecording = se.MultiRecordingTimeExtractor(recordings=recordings_list)

##############################################################################
# Since the :code:`MultiRecordingTimeExtractor` is a :code:`RecordingExtractor`, we can run spike sorting "normally"

multisorting = ss.run_klusta(multirecording)

##############################################################################
# The returned :code:`multisorting` object is a normal :code:`SortingExtractor`, but we now that its spike trains are
# concatenated similarly to the recording concatenation. So we have to split them back. We can do that using the `epoch`
# information in the :code:`MultiRecordingTimeExtractor`:

sortings = []

sortings = []
for epoch in multirecording.get_epoch_names():
    info = multirecording.get_epoch_info(epoch)
    sorting_single = se.SubSortingExtractor(multisorting, start_frame=info['start_frame'], end_frame=info['end_frame'])
    sortings.append(sorting_single)

##############################################################################
# The :code:`SortingExtractor` objects in  the :code:`sortings` list contain now split spike trains. The nice thing of
# this approach is that the unit_ids for the different epochs are the same unit!