def test_init(self): is1 = ImageSeries(name='is1', data=np.ones((2, 2, 2)), unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.]) is2 = ImageSeries(name='is2', data=np.ones((2, 2, 2)), unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.]) tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float) ts = TimeSeries("test_ts", list(range(len(tstamps))), 'unit', timestamps=tstamps) cis = CorrectedImageStack(is1, is2, ts) ProcessingModule('name', 'description').add(cis) self.assertEqual(cis.corrected, is1) self.assertEqual(cis.original, is2) self.assertEqual(cis.xy_translation, ts)
def test_init(self): is1 = ImageSeries(name='corrected', data=np.ones((2, 2, 2)), unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.]) is2 = ImageSeries(name='is2', unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.]) tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float64) ts = TimeSeries(name='xy_translation', data=list(range(len(tstamps))), unit='unit', timestamps=tstamps) cis = CorrectedImageStack(corrected=is1, original=is2, xy_translation=ts) self.assertEqual(cis.corrected, is1) self.assertEqual(cis.original, is2) self.assertEqual(cis.xy_translation, ts)
def add_motion_correction(nwbfile, expt): data = get_motion_correction(expt) cis = CorrectedImageStack(corrected=np.zeros((1, 1, 1)), xy_translation=data) imaging_mod = nwbfile.create_processing_module('imaging', 'imaging processing') imaging_mod.add_container(cis)
def setUpContainer(self): """ Return the test ImagingPlane to read/write """ self.device, self.optical_channel, self.imaging_plane = make_imaging_plane( ) self.two_photon_series = TwoPhotonSeries( name='TwoPhotonSeries', data=np.ones((1000, 100, 100)), imaging_plane=self.imaging_plane, rate=1.0, unit='normalized amplitude') corrected = ImageSeries(name='corrected', data=np.ones((1000, 100, 100)), unit='na', format='raw', starting_time=0.0, rate=1.0) xy_translation = TimeSeries( name='xy_translation', data=np.ones((1000, 2)), unit='pixels', starting_time=0.0, rate=1.0, ) corrected_image_stack = CorrectedImageStack( corrected=corrected, original=self.two_photon_series, xy_translation=xy_translation, ) return MotionCorrection(corrected_image_stacks=[corrected_image_stack])
def test_init(self): corrected = ImageSeries(name='corrected', data=np.ones((1000, 100, 100)), unit='na', format='raw', starting_time=0.0, rate=1.0) xy_translation = TimeSeries( name='xy_translation', data=np.ones((1000, 2)), unit='pixels', starting_time=0.0, rate=1.0, ) ip = create_imaging_plane() image_series = TwoPhotonSeries(name='TwoPhotonSeries1', data=np.ones((1000, 100, 100)), imaging_plane=ip, rate=1.0, unit='normalized amplitude') corrected_image_stack = CorrectedImageStack( corrected=corrected, original=image_series, xy_translation=xy_translation, ) motion_correction = MotionCorrection( corrected_image_stacks=[corrected_image_stack]) self.assertEqual( motion_correction.corrected_image_stacks['CorrectedImageStack'], corrected_image_stack)
def setUpContainer(self): """Return the test CorrectedImageStack to read/write.""" data = np.ones((2, 2, 2)), timestamps = [1., 2.] corrected_is = ImageSeries(name='corrected', data=data, unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=timestamps) self.original_is = ImageSeries(name='original_is', data=data, unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=timestamps) tstamps = [1., 2., 3.] ts = TimeSeries(name='xy_translation', data=list(range(len(tstamps))), unit='unit', timestamps=tstamps) return CorrectedImageStack(corrected=corrected_is, original=self.original_is, xy_translation=ts)
def test_init(self): is1 = ImageSeries(name='is1', source='a hypothetical source', data=list(), unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list()) is2 = ImageSeries(name='is2', source='a hypothetical source', data=list(), unit='unit', external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list()) tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float) ts = TimeSeries("test_ts", "a hypothetical source", list(range(len(tstamps))), 'unit', timestamps=tstamps) cis = CorrectedImageStack("CorrectedImageStackConstructor", is1, is2, ts) self.assertEqual(cis.source, "CorrectedImageStackConstructor") self.assertEqual(cis.corrected, is1) self.assertEqual(cis.original, is2) self.assertEqual(cis.xy_translation, ts)
def cicada_add_corrected_image_stack(self, corrected=None, original=None, xy_translation=None): """ class pynwb.ophys.CorrectedImageStack(corrected, original, xy_translation, name='CorrectedImageStack') """ # Nom du module où récupérer les infos de métadonnée name_module = "corrected_image_stack_" self.corrected_image_stack = CorrectedImageStack( corrected=corrected, original=original, xy_translation=xy_translation, name="CorrectedImageStack") self.motion_correction.add_corrected_image_stack( self.corrected_image_stack)
unit='na', format='raw', starting_time=0.0, rate=1.0) xy_translation = TimeSeries( name='xy_translation', data=np.ones((1000, 2)), unit='pixels', starting_time=0.0, rate=1.0, ) corrected_image_stack = CorrectedImageStack( corrected=corrected, original=image_series1, xy_translation=xy_translation, ) motion_correction = MotionCorrection( corrected_image_stacks=[corrected_image_stack]) #################### # We will create a :py:class:`~pynwb.base.ProcessingModule` named "ophys" to store optical # physiology data and add the motion correction data to the :py:class:`~pynwb.file.NWBFile`. # ophys_module = nwbfile.create_processing_module( name='ophys', description='optical physiology processed data') ophys_module.add(motion_correction)
def convert(self, **kwargs): """Convert the data and add to the nwb_file Args: **kwargs: arbitrary arguments """ super().convert(**kwargs) # ### setting parameters #### formats_implemented = ["external", "tiff"] if not kwargs.get("format"): raise Exception(f"'format' argument should be pass to convert function in class {self.__class__.__name__}") elif kwargs["format"] not in formats_implemented: raise Exception(f"'format' argument should have one of these values {formats_implemented} " f"for the convert function in class {self.__class__.__name__}") movie_format = kwargs["format"] if not kwargs.get("motion_corrected_file_name"): raise Exception(f"'motion_corrected_file_name' attribute should be pass to convert " f"function in class {self.__class__.__name__}") motion_corrected_file_name = kwargs["motion_corrected_file_name"] if "original_movie_file_name" in kwargs: original_movie_file_name = kwargs["original_movie_file_name"] else: original_movie_file_name = None if "xy_translation_file_name" in kwargs: xy_translation_file_name = kwargs["xy_translation_file_name"] else: xy_translation_file_name = None # Open YAML file with metadata if existing then dump all data in a dict if ("yaml_file_name" in kwargs) and kwargs["yaml_file_name"] is not None: with open(kwargs["yaml_file_name"], 'r') as stream: yaml_data = yaml.safe_load(stream) else: raise Exception(f"'yaml_file_name' attribute should be pass to convert " f"function in class {self.__class__.__name__}") # a calcium imaging rate has to be given, either trought the yaml file, either as argument # self.ci_sampling_rate can be obtained by the abf_converter if "imaging_rate" in yaml_data: self.ci_sampling_rate = yaml_data["imaging_rate"] elif "ci_sampling_rate" in kwargs: self.ci_sampling_rate = kwargs["ci_sampling_rate"] else: raise Exception(f"No 'imaging_rate' provided for movie {motion_corrected_file_name} in the yaml file " f"{kwargs['yaml_file_name']} or throught argument 'ci_sampling_rate' to function convert() " f"of the class {self.__class__.__name__}") if "indicator" in yaml_data: indicator = yaml_data["indicator"] else: raise Exception(f"No 'indicator' provided for movie {motion_corrected_file_name} in the yaml file " f"{kwargs['yaml_file_name']}") if "excitation_lambda" in yaml_data: excitation_lambda = yaml_data["excitation_lambda"] else: raise Exception(f"No 'excitation_lambda' provided for movie {motion_corrected_file_name} in the yaml file " f"{kwargs['yaml_file_name']}") if "emission_lambda" in yaml_data: emission_lambda = yaml_data["emission_lambda"] else: raise Exception(f"No 'emission_lambda' provided for movie {motion_corrected_file_name} in the yaml file " f"{kwargs['yaml_file_name']}") if "image_plane_location" in yaml_data: image_plane_location = yaml_data["image_plane_location"] else: raise Exception( f"No 'image_plane_location' provided for movie {motion_corrected_file_name} in the yaml file " f"{kwargs['yaml_file_name']}") try : if 'ci_recording_on_pause' in self.nwb_file.intervals: pause_intervals = self.nwb_file.intervals['ci_recording_on_pause'] pause_intervals_df = pause_intervals.to_dataframe() start_times = pause_intervals_df.loc[:, "start_time"] stop_times = pause_intervals_df.loc[:, "stop_time"] try: ci_frames_time_series = self.nwb_file.get_acquisition("ci_frames") ci_frames = np.where(ci_frames_time_series.data)[0] ci_frames_timestamps = ci_frames_time_series.timestamps[ci_frames] for i, start_time in enumerate(start_times): frame_index = np.searchsorted(a=ci_frames_timestamps, v=start_time) n_frames_to_add = (stop_times[i] - start_time) * self.ci_sampling_rate self.frames_to_add[frame_index] = int(n_frames_to_add) except KeyError: pass except AttributeError: pass # ### end setting parameters #### device = Device('2P_device') self.nwb_file.add_device(device) optical_channel = OpticalChannel('my_optchan', 'description', emission_lambda) imaging_plane = self.nwb_file.create_imaging_plane(name='my_imgpln', optical_channel=optical_channel, description='a very interesting part of the brain', device=device, excitation_lambda=excitation_lambda, imaging_rate=float(self.ci_sampling_rate), indicator=indicator, location=image_plane_location) if movie_format != "external": tiff_movie = self.load_tiff_movie_in_memory(motion_corrected_file_name) dim_y, dim_x = tiff_movie.shape[1:] n_frames = tiff_movie.shape[0] motion_corrected_img_series = TwoPhotonSeries(name='motion_corrected_ci_movie', dimension=[dim_x, dim_y], data=tiff_movie, imaging_plane=imaging_plane, starting_frame=[0], format=movie_format, rate=self.ci_sampling_rate) if original_movie_file_name is not None: original_tiff_movie = self.load_tiff_movie_in_memory(original_movie_file_name) dim_y, dim_x = original_tiff_movie.shape[1:] original_img_series = TwoPhotonSeries(name='original_ci_movie', dimension=[dim_x, dim_y], data=original_tiff_movie, imaging_plane=imaging_plane, starting_frame=[0], format=movie_format, rate=float(self.ci_sampling_rate)) else: im = PIL.Image.open(motion_corrected_file_name) n_frames = len(list(ImageSequence.Iterator(im))) dim_y, dim_x = np.array(im).shape motion_corrected_img_series = TwoPhotonSeries(name='motion_corrected_ci_movie', dimension=[dim_x, dim_y], external_file=[motion_corrected_file_name], imaging_plane=imaging_plane, starting_frame=[0], format=movie_format, rate=float(self.ci_sampling_rate)) if original_movie_file_name is not None: im = PIL.Image.open(original_movie_file_name) dim_y, dim_x = np.array(im).shape original_img_series = TwoPhotonSeries(name='original_ci_movie', dimension=[dim_x, dim_y], external_file=[original_movie_file_name], imaging_plane=imaging_plane, starting_frame=[0], format=movie_format, rate=float(self.ci_sampling_rate)) self.nwb_file.add_acquisition(motion_corrected_img_series) if original_movie_file_name is not None: self.nwb_file.add_acquisition(original_img_series) if xy_translation_file_name is not None: if xy_translation_file_name.endswith(".mat"): mvt_x_y = hdf5storage.loadmat(os.path.join(xy_translation_file_name)) x_shifts = mvt_x_y['xshifts'][0] y_shifts = mvt_x_y['yshifts'][0] elif xy_translation_file_name.endswith(".npy"): ops = np.load(os.path.join(xy_translation_file_name)) ops = ops.item() x_shifts = ops['xoff'] y_shifts = ops['yoff'] xy_translation = np.zeros((n_frames, 2), dtype="int16") frame_index = 0 for frame in np.arange(len(x_shifts)): xy_translation[frame_index, 0] = x_shifts[frame] xy_translation[frame_index, 1] = y_shifts[frame] # adding frames is necessary, in case the movie would be a concatenation of movie for exemple if frame in self.frames_to_add: frame_index += self.frames_to_add[frame] xy_translation_time_series = TimeSeries(name="xy_translation", data=xy_translation) corrected_image_stack = CorrectedImageStack(name="CorrectedImageStack", corrected=motion_corrected_img_series, original=original_img_series, xy_translation=xy_translation_time_series) self.nwb_file.add_acquisition(corrected_image_stack)