def run(self): from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor print('test1', self.firings_path, self.samplerate) sorting = SFMdaSortingExtractor(firings_file=self.firings_path) samplerate = self.samplerate max_samples = self.max_samples max_dt_msec = self.max_dt_msec bin_size_msec = self.bin_size_msec max_dt_tp = max_dt_msec * samplerate / 1000 bin_size_tp = bin_size_msec * samplerate / 1000 autocorrelograms = [] for unit_id in sorting.get_unit_ids(): print('Unit::g {}'.format(unit_id)) (bin_counts, bin_edges) = compute_autocorrelogram(sorting.get_unit_spike_train(unit_id), max_dt_tp=max_dt_tp, bin_size_tp=bin_size_tp, max_samples=max_samples) autocorrelograms.append(dict( unit_id=unit_id, bin_counts=bin_counts, bin_edges=bin_edges )) ret = dict( autocorrelograms=autocorrelograms ) with open(self.json_out, 'w') as f: json.dump(serialize_np(ret), f)
def load_sorting_results_info(firings_path, *, recording_path, epoch_name, ntrode_name, curated=False): if not mt.findFile(firings_path): return None sorting = SFMdaSortingExtractor(firings_file=firings_path) total_num_events = 0 for unit_id in sorting.get_unit_ids(): spike_times = sorting.get_unit_spike_train(unit_id=unit_id) total_num_events = total_num_events + len(spike_times) return dict( type='sorting_results', epoch_name=epoch_name, ntrode_name=ntrode_name, curated=curated, firings_path=firings_path, recording_path=recording_path, unit_ids=sorting.get_unit_ids(), num_events=total_num_events )