def load_spike_trains(file_path): cpath = os.path.dirname(os.path.realpath(__file__)) file_path = os.path.join(cpath, file_path) if file_path.endswith('.csv'): return SpikeTrains.from_csv(file_path) elif file_path.endswith('.h5'): return SpikeTrains.from_sonata(file_path) elif file_path.endswith('.nwb'): return SpikeTrains.from_nwb(file_path)
def test_multipop_with_default(path): path = full_path(path) st = SpikeTrains.from_sonata(path, population='tw') assert ('tw' in st.populations and 'lgn' not in st.populations) n1_tw_ts = st.get_times(node_id=0, population='tw') assert (len(n1_tw_ts) > 0) assert (np.all(n1_tw_ts == st.get_times(node_id=0)))
def test_single_populations(path): path = full_path(path) st = SpikeTrains.from_sonata(path) assert (st.populations == ['v1']) node0_timestamps = st.get_times(node_id=0, population='v1') assert (np.all(st.get_times(node_id=0) == node0_timestamps)) assert (st.get_times(node_id=0, population='should_not_work') == [])
def test_old_populations(path): path = full_path(path) st = SpikeTrains.from_sonata(full_path(path)) assert (st.populations == [pop_na]) node0_timestamps = st.get_times(node_id=0, population=pop_na) assert (len(node0_timestamps) > 0) assert (np.all(st.get_times(node_id=0) == node0_timestamps)) assert (np.all( st.get_times(node_id=0, population='should_still_work') == node0_timestamps))
def test_multi_populations(path): path = full_path(path) st = SpikeTrains.from_sonata(path) assert ('tw' in st.populations and 'lgn' in st.populations) n1_tw_ts = st.get_times(node_id=0, population='tw') n1_lgn_ts = st.get_times(node_id=0, population='lgn') assert (len(n1_tw_ts) > 0) assert (len(n1_lgn_ts) > 0) assert (not np.array_equal(n1_tw_ts, n1_lgn_ts) ) # (np.any(n1_tw_ts != n1_lgn_ts)) assert (st.get_times(node_id=0, population='other') == [])
def test_empty_spikes(): st = SpikeTrains(adaptor=spike_train_buffer.STMemoryBuffer()) output_path = full_path('output/tmpspikes.h5') st.to_sonata(path=output_path) st.close() st_empty = SpikeTrains.from_sonata(output_path) assert (st_empty.populations == []) assert (st_empty.n_spikes() == 0) assert (list(st_empty.spikes()) == []) os.remove(output_path)
def test_single_proc(adaptor_cls): buffer_dir = tempfile.mkdtemp() output_csv = os.path.join(buffer_dir, 'testfile.csv') output_h5 = os.path.join(buffer_dir, 'testfile.h5') adaptor = adaptor_cls() spike_trains = SpikeTrains(read_adaptor=adaptor, write_adaptor=adaptor) timestamps = np.linspace(1000.0, 0.0, 1000) node_ids = np.arange(0, 1000) for node_id, timestamp in zip(node_ids, timestamps): spike_trains.add_spike(node_id, timestamp) for node_id in range(1000, 2000): spike_trains.add_spikes(node_id, np.linspace(0.0, 2000.0, 100)) for node_id in range(0, 100, 5): spike_trains.add_spikes(np.repeat(node_id, 50), np.random.uniform(0.1, 3000.0, 50), population='test') spike_trains.to_csv(output_csv, sort_order=sort_order.by_time) df = pd.read_csv(output_csv, sep=' ') assert(len(df) == 102000) assert(len(df['population'].unique()) == 2) test_pop = df[df['population'] == 'test'] assert(len(test_pop) == 20*50) assert(all(np.diff(test_pop['timestamps']) >= 0.0)) default_pop = df[df['population'] == pop_na] assert(len(default_pop) == 1000 + 1000*100) assert(all(np.diff(default_pop['timestamps']) >= 0.0)) spike_trains.to_sonata(output_h5, sort_order=sort_order.by_id) h5root = h5py.File(output_h5, 'r') test_pop = h5root['spikes/test'] assert(test_pop.attrs['sorting'] == 'by_id') assert(test_pop['timestamps'].shape == (1000,)) assert(test_pop['node_ids'].shape == (1000,)) assert(len(np.unique(test_pop['node_ids'][()])) == 20) assert(all(np.diff(test_pop['node_ids'][()]) >= 0)) default_pop = h5root['spikes'][pop_na] assert(default_pop.attrs['sorting'] == 'by_id') assert(default_pop['timestamps'].shape == (1000 + 1000*100,)) assert(default_pop['node_ids'].shape == (1000 + 1000*100,)) assert(all(np.diff(default_pop['node_ids'][()]) >= 0)) assert(all(np.diff(default_pop['node_ids']) >= 0)) assert(len(np.unique(default_pop['node_ids'][()])) == 2000) spike_trains.close() shutil.rmtree(buffer_dir)