def test_timestamps_property(self): # Timestamps from starting_time and rate tsr = TimeSeriesReference( 5, 4, TimeSeries(name='test0', description='test0', data=np.arange(10), unit='unit', starting_time=5.0, rate=0.1)) np.testing.assert_array_equal(tsr.timestamps, np.array([5.5, 5.6, 5.7, 5.8])) # Timestamps from timestamps directly tsr = TimeSeriesReference( 5, 4, TimeSeries(name='test0', description='test0', data=np.arange(10), unit='unit', timestamps=np.arange(10).astype(float))) np.testing.assert_array_equal(tsr.timestamps, np.array([5., 6., 7., 8.]))
def test_dataio_dci_data(self): def generator_factory(): return (i for i in range(100)) data = H5DataIO(DataChunkIterator(data=generator_factory())) ts1 = TimeSeries('test_ts1', 'unit test test_DataIO', data, 'grams', starting_time=0.0, rate=0.1) self.assertEqual(ts1.num_samples, -1) for xi, yi in zip(data, generator_factory()): assert np.allclose(xi, yi)
def add_stimulus_timestamps(nwbfile, stimulus_timestamps, module_name='stimulus'): stimulus_ts = TimeSeries(data=stimulus_timestamps, name='timestamps', timestamps=stimulus_timestamps, unit='s') stim_mod = ProcessingModule(module_name, 'Stimulus Times processing') nwbfile.add_processing_module(stim_mod) stim_mod.add_data_interface(stimulus_ts) return nwbfile
def add_motion_correction(nwbfile, motion_correction): twop_module = nwbfile.modules['two_photon_imaging'] ophys_timestamps = twop_module.get_data_interface('dff').roi_response_series['traces'].timestamps t1 = TimeSeries( name='x', data=motion_correction['x'].values, timestamps=ophys_timestamps, unit='pixels' ) t2 = TimeSeries( name='y', data=motion_correction['y'].values, timestamps=ophys_timestamps, unit='pixels' ) motion_module = ProcessingModule('motion_correction', 'Motion Correction processing module') motion_module.add_data_interface(t1) motion_module.add_data_interface(t2) nwbfile.add_processing_module(motion_module)
def add_rewards(nwbfile, rewards_df): assert rewards_df.index.name == 'timestamps' reward_volume_ts = TimeSeries( name='volume', data=rewards_df.volume.values, timestamps=rewards_df.index.values, unit='ml' ) autorewarded_ts = TimeSeries( name='autorewarded', data=rewards_df.autorewarded.values, timestamps=reward_volume_ts.timestamps, unit=None ) rewards_mod = ProcessingModule('rewards', 'Licking behavior processing module') rewards_mod.add_data_interface(reward_volume_ts) rewards_mod.add_data_interface(autorewarded_ts) nwbfile.add_processing_module(rewards_mod) return nwbfile
def test_timestamps_property_bad_reference(self): tsr = TimeSeriesReference( 0, 12, TimeSeries(name='test0', description='test0', data=np.arange(10), unit='unit', timestamps=np.arange(10).astype(float))) with self.assertRaisesWith( IndexError, "'idx_start + count' out of range for timeseries 'test0'"): tsr.timestamps tsr = TimeSeriesReference( 0, 12, TimeSeries(name='test0', description='test0', data=np.arange(10), unit='unit', starting_time=5.0, rate=0.1)) with self.assertRaisesWith( IndexError, "'idx_start + count' out of range for timeseries 'test0'"): tsr.timestamps
def test_dci_data_arr(self): def generator_factory(): return (np.array([i, i + 1]) for i in range(100)) data = DataChunkIterator(data=generator_factory()) ts1 = TimeSeries('test_ts1', data, 'grams', starting_time=0.0, rate=0.1) # with self.assertWarnsRegex(UserWarning, r'.*name: \'test_ts1\'.*'): with self.assertWarns(UserWarning): self.assertIs(ts1.num_samples, None) for xi, yi in zip(data, generator_factory()): assert np.allclose(xi, yi)
def test_show_index_series(): data = np.array([12, 14, 16, 18, 20, 22, 24, 26]) indexed_timeseries = TimeSeries(name='Index Series time data', data=np.random.rand(800).reshape( (8, 10, 10)), rate=1.) index_series = IndexSeries(name='Sample Index Series', data=data, indexed_timeseries=indexed_timeseries, rate=1.) assert isinstance( show_index_series(index_series, default_neurodata_vis_spec), widgets.Widget)
def test_show_single_sweep_sequence(): device = Device(name="Axon Patch-Clamp") electrode = IntracellularElectrode(name="Patch Clamp", device=device, description="whole-cell") stimulus_data = np.random.rand(160, 2) stimulus = TimeSeries( name="test_timeseries", data=stimulus_data, unit="m", starting_time=0.0, rate=1.0, ) response_data = np.random.rand(160, 2) response = TimeSeries( name="test_timeseries", data=response_data, unit="m", starting_time=0.0, rate=1.0, ) icr = IntracellularRecordings() icr.add_recording( electrode=electrode, stimulus_start_index=0, stimulus_index_count=100, stimulus=stimulus, response_start_index=0, response_index_count=100, response=response, ) sweeps_table = Sweeps(intracellular_recordings_table=icr) assert isinstance(show_single_sweep_sequence(sweeps_table), plt.Figure)
def convert_to_ts(self, tdt_stream, name, comments, description, unit="NA"): # converts a tdt stream to a NWB time series return TimeSeries( name=name, data=tdt_stream.data.T, rate=tdt_stream.fs, comments=comments, description=description, unit=unit, )
def test_get_length1_valid_data(self): """Get data from a TimeSeriesReferenceVectorData with one element and valid data""" temp = TimeSeriesReferenceVectorData() value = TimeSeriesReference( 0, 5, TimeSeries(name='test', description='test', data=np.arange(10), unit='unit', starting_time=5.0, rate=0.1)) temp.append(value) self.assertTupleEqual(temp[0], value) self.assertListEqual(temp[:], [ TimeSeriesReferenceVectorData.TIME_SERIES_REFERENCE_TUPLE(*value), ])
def setUp(self): self.__manager = get_manager() self.start_time = datetime(1971, 1, 1, 12, tzinfo=tzutc()) self.data = np.arange(2000).reshape((2, 1000)) self.timestamps = np.linspace(0, 1, 1000) self.container = TimeSeries( name='data_ts', unit='V', data=self.data, timestamps=self.timestamps ) self.data_filename = 'test_time_series_modular_data.nwb' self.link_filename = 'test_time_series_modular_link.nwb'
def add_licks(nwbfile, licks): lick_timeseries = TimeSeries( name='licks', data=licks.frame.values, timestamps=licks.timestamps.values, description=('Timestamps and stimulus presentation ' 'frame indices for lick events'), unit='N/A') # Add lick interface to nwb file, by way of a processing module: licks_mod = ProcessingModule('licking', 'Licking behavior processing module') licks_mod.add_data_interface(lick_timeseries) nwbfile.add_processing_module(licks_mod) return nwbfile
def add_licks(nwbfile, licks): licks_event_series = TimeSeries(data=licks.time.values, name='timestamps', timestamps=licks.time.values, unit='s') # Add lick event timeseries to lick interface: licks_interface = BehavioralEvents([licks_event_series], 'licks') # Add lick interface to nwb file, by way of a processing module: licks_mod = ProcessingModule('licking', 'Licking behavior processing module') licks_mod.add_data_interface(licks_interface) nwbfile.add_processing_module(licks_mod) return nwbfile
def test_dci_data(self): def generator_factory(): return (i for i in range(100)) data = DataChunkIterator(data=generator_factory()) ts1 = TimeSeries('test_ts1', data, 'grams', starting_time=0.0, rate=0.1) with self.assertWarnsRegex( UserWarning, r'The data attribute on this TimeSeries \(named: test_ts1\) has no ' '__len__'): self.assertIs(ts1.num_samples, None) for xi, yi in zip(data, generator_factory()): assert np.allclose(xi, yi)
def test_dataio_dci_data(self): def generator_factory(): return (i for i in range(100)) data = H5DataIO(DataChunkIterator(data=generator_factory())) ts1 = TimeSeries('test_ts1', data, 'grams', starting_time=0.0, rate=0.1) with self.assertWarnsWith( UserWarning, 'The data attribute on this TimeSeries (named: test_ts1) has a ' '__len__, but it cannot be read'): self.assertIs(ts1.num_samples, None) for xi, yi in zip(data, generator_factory()): assert np.allclose(xi, yi)
def copy_trials(nwbfile_in, nwbfile_out, stub=STUB_percentage): default_trial_columns = ['start_time', 'stop_time', 'tags', 'timeseries'] trials_table = nwbfile_in.trials if trials_table is not None: for custom_e_column in set(trials_table.colnames) - set(default_trial_columns): nwbfile_out.add_trial_column(name=trials_table[custom_e_column].name, description=trials_table[custom_e_column].description) for trial_no in range(len(trials_table)): in_dict = {} for colname in trials_table.colnames: if 'timeseries' == colname: ts_in = trials_table[colname][trial_no] ts_kwargs = {i: j for i, j in ts_in.fields if i not in ['data', 'timestamps']} stub_length = np.round(ts_in.data.shape[0]*stub).astype('int') ts_kwargs.update(data=ts_in.data[: stub_length], timestamps=ts_in.timestamps[:stub_length]) in_dict.update(timeseries=TimeSeries(ts_kwargs)) else: in_dict.update({colname: trials_table[colname][trial_no]}) nwbfile_out.trials.add_row(**in_dict)
def setUp(self): self.start_time = datetime(1971, 1, 1, 12, tzinfo=tzutc()) self.data = np.arange(2000).reshape((2, 1000)) self.timestamps = np.linspace(0, 1, 1000) self.container = TimeSeries( name='data_ts', unit='V', data=self.data, timestamps=self.timestamps ) self.data_filename = os.path.join(os.getcwd(), 'test_time_series_modular_data.nwb') self.link_filename = os.path.join(os.getcwd(), 'test_time_series_modular_link.nwb') self.read_container = None self.link_read_io = None self.data_read_io = None
def roundtripContainer(self): # create and write data file data_file = NWBFile( session_description='a test file', identifier='data_file', session_start_time=self.start_time ) data_file.add_acquisition(self.container) with HDF5IO(self.data_filename, 'w', manager=get_manager()) as data_write_io: data_write_io.write(data_file) # read data file with HDF5IO(self.data_filename, 'r', manager=get_manager()) as self.data_read_io: data_file_obt = self.data_read_io.read() # write "link file" with timeseries.data that is an external link to the timeseries in "data file" # also link timeseries.timestamps.data to the timeseries.timestamps in "data file" with HDF5IO(self.link_filename, 'w', manager=get_manager()) as link_write_io: link_file = NWBFile( session_description='a test file', identifier='link_file', session_start_time=self.start_time ) self.link_container = TimeSeries( name='test_mod_ts', unit='V', data=data_file_obt.get_acquisition('data_ts'), # test direct link timestamps=H5DataIO( data=data_file_obt.get_acquisition('data_ts').timestamps, link_data=True # test with setting link data ) ) link_file.add_acquisition(self.link_container) link_write_io.write(link_file) # note that self.link_container contains a link to a dataset that is now closed # read the link file self.link_read_io = HDF5IO(self.link_filename, 'r', manager=get_manager()) self.read_nwbfile = self.link_read_io.read() return self.getContainer(self.read_nwbfile)
def test_get_length5_valid_data(self): """Get data from a TimeSeriesReferenceVectorData with 5 elements""" temp = TimeSeriesReferenceVectorData() num_values = 5 values = [ TimeSeriesReference( 0, 5, TimeSeries(name='test' + str(i), description='test', data=np.arange(10), unit='unit', starting_time=5.0, rate=0.1)) for i in range(num_values) ] for v in values: temp.append(v) # Test single element selection for i in range(num_values): # test index slicing re = temp[i] self.assertTupleEqual(re, values[i]) # test slicing re = temp[i:i + 1] self.assertTupleEqual( re[0], TimeSeriesReferenceVectorData.TIME_SERIES_REFERENCE_TUPLE( *values[i])) # Test multi element selection re = temp[0:2] self.assertTupleEqual( re[0], TimeSeriesReferenceVectorData.TIME_SERIES_REFERENCE_TUPLE( *values[0])) self.assertTupleEqual( re[1], TimeSeriesReferenceVectorData.TIME_SERIES_REFERENCE_TUPLE( *values[1]))
def test_cache_spec(self): self.test_temp_file = tempfile.NamedTemporaryFile() # On Windows h5py cannot truncate an open file in write mode. # The temp file will be closed before h5py truncates it # and will be removed during the tearDown step. self.test_temp_file.close() self.io = NWBHDF5IO(self.test_temp_file.name) # Setup all the data we need start_time = datetime(2017, 4, 3, 11, 0, 0) create_date = datetime(2017, 4, 15, 12, 0, 0) data = np.arange(1000).reshape((100, 10)) timestamps = np.arange(100) # Create the first file nwbfile1 = NWBFile(source='PyNWB tutorial', session_description='demonstrate external files', identifier='NWBE1', session_start_time=start_time, file_create_date=create_date) test_ts1 = TimeSeries(name='test_timeseries', source='PyNWB tutorial', data=data, unit='SIunit', timestamps=timestamps) nwbfile1.add_acquisition(test_ts1) # Write the first file self.io.write(nwbfile1, cache_spec=True) self.io.close() ns_catalog = NamespaceCatalog(group_spec_cls=NWBGroupSpec, dataset_spec_cls=NWBDatasetSpec, spec_namespace_cls=NWBNamespace) NWBHDF5IO.load_namespaces(ns_catalog, self.test_temp_file.name) self.assertEqual(ns_catalog.namespaces, ('core', )) source_types = self.__get_types(self.io.manager.namespace_catalog) read_types = self.__get_types(ns_catalog) self.assertSetEqual(source_types, read_types)
def test_init(self): is1 = ImageSeries(name='is1', data=list(), unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list()) is2 = ImageSeries(name='is2', data=list(), unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list()) tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float) ts = TimeSeries("test_ts", list(range(len(tstamps))), 'unit', timestamps=tstamps) cis = CorrectedImageStack(is1, is2, ts) self.assertEqual(cis.corrected, is1) self.assertEqual(cis.original, is2) self.assertEqual(cis.xy_translation, ts)
def test_init_timestampslink_set(self): ts = TimeSeries('test_ts', list(), 'unit', timestamps=list()) self.assertIsInstance(ts.timestamp_link, set) self.assertEqual(len(ts.timestamp_link), 0)
def test_getitem(self): ts = TimeSeries('test_ts', [0, 1, 2, 3, 4, 5], 'grams', timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) self.pm.add(ts) tmp = self.pm['test_ts'] self.assertIs(tmp, ts)
def test_init_no_parent(self): ts = TimeSeries('test_ts', list(), 'unit', timestamps=list()) self.assertEqual(ts.name, 'test_ts') self.assertIsNone(ts.parent)
def test_no_starting_time(self): # if no starting_time is given, 0.0 is assumed ts1 = TimeSeries('test_ts1', rate=0.1) self.assertEqual(ts1.starting_time, 0.0)
def test_no_time(self): with self.assertRaisesWith(TypeError, "either 'timestamps' or 'rate' must be specified"): TimeSeries('test_ts2', [10, 11, 12, 13, 14, 15], 'grams')
def test_add_data_interface(self): ts = TimeSeries('test_ts', [0, 1, 2, 3, 4, 5], 'grams', timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) self.pm.add(ts) self.assertIn(ts.name, self.pm.containers) self.assertIs(ts, self.pm.containers[ts.name])
def test_nodata(self): ts1 = TimeSeries('test_ts1', starting_time=0.0, rate=0.1) with self.assertWarns(UserWarning): self.assertIs(ts1.num_samples, None)
def test_timestamps_timeseries(self): ts1 = TimeSeries('test_ts1', [0, 1, 2, 3, 4, 5], 'grams', timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]) ts2 = TimeSeries('test_ts2', [10, 11, 12, 13, 14, 15], 'grams', timestamps=ts1) self.assertEqual(ts2.timestamps, [0.0, 0.1, 0.2, 0.3, 0.4, 0.5])