def add_dff_traces(nwbfile, dff_traces, ophys_timestamps): dff_traces = dff_traces.reset_index().set_index('cell_roi_id')[['dff']] ophys_module = nwbfile.processing['ophys'] # trace data in the form of rois x timepoints trace_data = np.array([ dff_traces.loc[cell_roi_id].dff for cell_roi_id in dff_traces.index.values ]) cell_specimen_table = nwbfile.processing['ophys'].data_interfaces[ 'image_segmentation'].plane_segmentations[ 'cell_specimen_table'] # noqa: E501 roi_table_region = cell_specimen_table.create_roi_table_region( description="segmented cells labeled by cell_specimen_id", region=slice(len(dff_traces))) # Create/Add dff modules and interfaces: assert dff_traces.index.name == 'cell_roi_id' dff_interface = DfOverF(name='dff') ophys_module.add_data_interface(dff_interface) dff_interface.create_roi_response_series( name='traces', data=trace_data.T, # Should be stored as timepoints x rois unit='NA', rois=roi_table_region, timestamps=ophys_timestamps) return nwbfile
def add_dff_traces(nwbfile, dff_traces, ophys_timestamps): dff_traces = dff_traces.reset_index().set_index('cell_roi_id')[['dff']] twop_module = nwbfile.modules['two_photon_imaging'] data = np.array([dff_traces.loc[cell_roi_id].dff for cell_roi_id in dff_traces.index.values]) # assert len(ophys_timestamps.timestamps) == len(data) cell_specimen_table = nwbfile.modules['two_photon_imaging'].data_interfaces['image_segmentation'].plane_segmentations['cell_specimen_table'] roi_table_region = cell_specimen_table.create_roi_table_region( description="segmented cells labeled by cell_specimen_id", region=slice(len(dff_traces))) # Create/Add dff modules and interfaces: assert dff_traces.index.name == 'cell_roi_id' dff_interface = DfOverF(name='dff') twop_module.add_data_interface(dff_interface) dff_interface.create_roi_response_series( name='traces', data=data, unit='NA', rois=roi_table_region, timestamps=ophys_timestamps) return nwbfile
def add_dff(module, expt, rt_region): fs = 1 / expt.frame_period() fluor = DfOverF(name='DFF') sigs = expt.imagingData(dFOverF=None) fluor.create_roi_response_series(name='DFF', data=sigs.squeeze(), rate=fs, unit='NA', rois=rt_region) module.add_data_interface(fluor)
def cicada_create_DfOverF(self): """ class pynwb.ophys.DfOverF(roi_response_series={}, name='DfOverF') """ # Nom du module où récupérer les infos de métadonnée name_module = "DfOverF_" roi_response_series = {} self.DfOverF = DfOverF(roi_response_series=roi_response_series, name="DfOverF") self.mod.add_data_interface(self.DfOverF)
def test_init(self): ip = CreatePlaneSegmentation() rt_region = ip.create_roi_table_region('the second ROI', region=[1]) rrs = RoiResponseSeries('test_ts', list(), rt_region, unit='unit', timestamps=list()) dof = DfOverF(rrs) self.assertEqual(dof.roi_response_series['test_ts'], rrs)
def test_init(self): ps = create_plane_segmentation() rt_region = ps.create_roi_table_region(description='the second ROI', region=[1]) rrs = RoiResponseSeries(name='test_ts', data=[1, 2, 3], rois=rt_region, unit='unit', timestamps=[0.1, 0.2, 0.3]) dof = DfOverF(rrs) self.assertEqual(dof.roi_response_series['test_ts'], rrs)
def test_init(self): ip = CreatePlaneSegmentation() iS = ImageSegmentation('test source', ip, name='test_iS') rrs = RoiResponseSeries('test_ts', 'a hypothetical source', list(), 'unit', ['name1'], iS, timestamps=list()) dof = DfOverF('test_dof', rrs) self.assertEqual(dof.source, 'test_dof') self.assertEqual(dof.roi_response_series, rrs)
def test_init(self): ip = CreatePlaneSegmentation() rt_region = ip.create_roi_table_region([1], 'the second ROI') rrs = RoiResponseSeries('test_ts', 'a hypothetical source', list(), 'unit', rt_region, timestamps=list()) dof = DfOverF('test_dof', rrs) self.assertEqual(dof.source, 'test_dof') self.assertEqual(dof.roi_response_series['test_ts'], rrs)
plane_segmentation = image_segmentation_interface.create_plane_segmentation( name='plane_segmentation', source='NA', description='Segmentation for imaging plane', imaging_plane=imaging_plane) for cell_specimen_id in cell_specimen_ids: curr_name = str(cell_specimen_id) curr_image_mask = dataset.get_roi_mask_array([cell_specimen_id])[0] plane_segmentation.add_roi(curr_name, [], curr_image_mask) ######################################## # 7) Next, we add a dF/F interface to the module. This allows us to write the dF/F timeseries data associated with # each ROI. dff_interface = DfOverF(name='dff_interface', source='Flourescence data container') ophys_module.add_data_interface(dff_interface) rt_region = plane_segmentation.create_roi_table_region( description='segmented cells with cell_specimen_ids', names=[str(x) for x in cell_specimen_ids], ) dFF_series = dff_interface.create_roi_response_series( name='df_over_f', source='NA', data=dFF, unit='NA', rois=rt_region, timestamps=timestamps, )
class Preprocessing: def load_yaml(self): # Open YAML file with metadata if existing then dump all data in a dict if os.path.isfile("data.yaml"): with open("data.yaml", 'r') as stream: self.data = yaml.safe_load(stream) # Same but with .yml extension elif os.path.isfile("data.yml"): with open("data.yml", 'r') as stream: self.data = yaml.safe_load(stream) else: self.data = dict() if self.data is None: self.data = dict() # Dump the dict in the YAML file to save what the user inputted for future use with open('data.yaml', 'w') as outfile: yaml.dump(self.data, outfile, default_flow_style=False, allow_unicode=True) def add_required_metadata(self, data, key, metadata_type): # Prompt user to give required metadata # Need to be wary of the type (put " " for string and datetime.datetime(%Y, %m, %d) for datetime) print("Missing required " + metadata_type + " metadata : " + key + "\n") metadata_value = input("Type the value (with respect of the type) : ") data[key] = eval(metadata_value) # Dump the dict in the YAML file to save what the user inputted for future use with open('data.yaml', 'w') as outfile: yaml.dump(self.data, outfile, default_flow_style=False, allow_unicode=True) return data def add_optional_metadata(self, data): # Allow user to add as much metadata as he wants with the key he wants # Need to refer to documentation if he wants to fill existing attributes but he can create new ones and use # them in his own code keyboard_input = False while not keyboard_input: # Prompt user to give optional metadata # Need to be wary of the type (put " " for string and datetime.datetime(%Y, %m, %d) for datetime) metadata_key = input( "Type the name of the metadata you want to add ? ") metadata_key.replace(" ", "").lower().replace("", "_") metadata_value = input( "Type the value (with respect of the type) : ") data[metadata_key] = eval(metadata_value) go = input("Continue ? (yes/no)") # Prevent errors if other input than yes/np while go.replace(" ", "").lower() != "no" and go.replace( " ", "").lower() != "yes": go = input("Continue ? (yes/no)") if go.replace(" ", "").lower() == "no": keyboard_input = True # Dump the dict in the YAML file to save what the user inputted for future use with open('data.yaml', 'w') as outfile: yaml.dump(self.data, outfile, default_flow_style=False, allow_unicode=True) return data def ophys_metadata_acquisition(self): # TODO: Peut être faire un dictionnaire de sous-dictionnaires correspondant à chaque classe à remplir # Points positifs : Plus grand lisibilité dans le YAML, gestion simple des ambiguités de nom # tout en gardant la notation de NWB. # Points négatifs : Plus dur à rentrer en input pour l'utilisateur (il faut lui demander à quelle classe # correspond sa valeur). # List of the required metadata # TODO : maybe better implementation is possible ? required_metadata = [ "session_description", "identifier", "session_start_time" ] if self.data.get('ophys_metadata') is None: self.data['ophys_metadata'] = dict() for i in required_metadata: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") else: # Check if YAML file doesn't have all the required attributes and ask them the missing ones metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") print("Found ophys metadata : " + str(list(self.data['ophys_metadata'].keys()))) add_metadata = input( "Do you want to add more ophys metadata ? (yes/no) ") # Prevent errors if other input than yes/np while add_metadata.replace( " ", "").lower() != "no" and add_metadata.replace( " ", "").lower() != "yes": add_metadata = input( "Do you want to add more ophys metadata ? (yes/no) ") if add_metadata.replace(" ", "").lower() == "yes": self.data["ophys_metadata"] = self.add_optional_metadata( self.data["ophys_metadata"]) # Create new NWB file with all known attributes self.nwbfile = NWBFile( session_description=self.data['ophys_metadata'].get( "session_description"), identifier=self.data['ophys_metadata'].get("identifier"), session_start_time=self.data['ophys_metadata'].get( "session_start_time"), file_create_date=self.data['ophys_metadata'].get( "file_create_date"), timestamps_reference_time=self.data['ophys_metadata'].get( "timestamps_reference_time"), experimenter=self.data['ophys_metadata'].get("experimenter"), experiment_description=self.data['ophys_metadata'].get( "experiment_description"), session_id=self.data['ophys_metadata'].get("session_id"), institution=self.data['ophys_metadata'].get("institution"), keywords=self.data['ophys_metadata'].get("keywords"), notes=self.data['ophys_metadata'].get("notes"), pharmacology=self.data['ophys_metadata'].get("pharmacology"), protocol=self.data['ophys_metadata'].get("protocol"), related_publications=self.data['ophys_metadata'].get( "related_publications"), slices=self.data['ophys_metadata'].get("slices"), source_script=self.data['ophys_metadata'].get("source_script"), source_script_file_name=self.data['ophys_metadata'].get( "source_script_file_name"), data_collection=self.data['ophys_metadata'].get( "self.data['ophys_metadata']_collection"), surgery=self.data['ophys_metadata'].get("surgery"), virus=self.data['ophys_metadata'].get("virus"), stimulus_notes=self.data['ophys_metadata'].get("stimulus_notes"), lab=self.data['ophys_metadata'].get("lab"), subject=self.subject) def subject_metadata_acquisition(self): # Check if metadata about the subject exists and prompt the user if he wants to add some if self.data.get('subject_metadata') is None: print("No subject metadata found \n ") self.data['subject_metadata'] = dict() elif len(self.data['subject_metadata']) == 0: print("No subject metadata found \n ") else: print("Found subject metadata : " + str(list(self.data['subject_metadata'].keys()))) add_metadata = input( "Do you want to add more subject metadata ? (yes/no) ") # Prevent errors if other input than yes/np while add_metadata.replace( " ", "").lower() != "no" and add_metadata.replace( " ", "").lower() != "yes": add_metadata = input( "Do you want to add more subject metadata ? (yes/no) ") if add_metadata.replace(" ", "").lower() == "yes": self.data['subject_metadata'] = self.add_optional_metadata( self.data['subject_metadata']) self.subject = Subject( age=self.data['subject_metadata'].get("age"), description=self.data['subject_metadata'].get("description"), genotype=self.data['subject_metadata'].get("genotype"), sex=self.data['subject_metadata'].get("sex"), species=self.data['subject_metadata'].get("species"), subject_id=self.data['subject_metadata'].get("subject_id"), weight=self.data['subject_metadata'].get("weight"), date_of_birth=self.data['subject_metadata'].get("date_of_birth")) def cicada_create_device(self): """ class pynwb.device.Device(name, parent=None) """ required_metadata = ["device_name"] metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") self.device = Device( name=self.data['ophys_metadata'].get("device_name")) self.nwbfile.add_device(self.device) def cicada_create_optical_channel(self): required_metadata = [ "optical_channel_name", "optical_channel_description", "optical_channel_emission_lambda" ] metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") self.optical_channel = OpticalChannel( name=self.data['ophys_metadata'].get("optical_channel_name"), description=self.data['ophys_metadata'].get( "optical_channel_description"), emission_lambda=self.data['ophys_metadata'].get( "optical_channel_emission_lambda")) def cicada_create_module(self): required_metadata = [ "processing_module_name", "processing_module_description" ] metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") self.mod = self.nwbfile.create_processing_module( name=self.data['ophys_metadata'].get("processing_module_name"), description=self.data['ophys_metadata'].get( "processing_module_description")) def cicada_create_imaging_plane(self): """ class pynwb.ophys.ImagingPlane(name, optical_channel, description, device, excitation_lambda, imaging_rate, indicator, location, manifold=None, conversion=None, unit=None, reference_frame=None, parent=None) """ required_metadata = [ "imaging_plane_name", "imaging_plane_description", "imaging_plane_excitation_lambda", "imaging_plane_imaging_rate", "imaging_plane_indicator", "imaging_plane_location" ] metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") # Nom du module où récupérer les infos de métadonnée name_module = "imaging_plane_" self.imaging_plane = self.nwbfile.create_imaging_plane( name=self.data['ophys_metadata'].get(name_module + "name"), optical_channel=self.optical_channel, description=self.data['ophys_metadata'].get(name_module + "description"), device=self.device, excitation_lambda=self.data['ophys_metadata'].get( name_module + "excitation_lambda"), imaging_rate=self.data['ophys_metadata'].get(name_module + "imaging_rate"), indicator=self.data['ophys_metadata'].get(name_module + "indicator"), location=self.data['ophys_metadata'].get(name_module + "location"), manifold=self.data['ophys_metadata'].get(name_module + "manifold"), conversion=self.data['ophys_metadata'].get(name_module + "conversion"), unit=self.data['ophys_metadata'].get(name_module + "unit"), reference_frame=self.data['ophys_metadata'].get(name_module + "reference_frame")) def cicada_create_two_photon_series(self, data_to_store=None, external_file=None): """ class pynwb.ophys.TwoPhotonSeries(name, imaging_plane, data=None, unit=None, format=None, field_of_view=None, pmt_gain=None, scan_line_rate=None, external_file=None, starting_frame=None, bits_per_pixel=None, dimension=[nan], resolution=0.0, conversion=1.0, timestamps=None, starting_time=None, rate=None, comments='no comments', description='no description', control=None, control_description=None, parent=None) """ required_metadata = ["two_photon_name"] metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") # Nom du module où récupérer les infos de métadonnée name_module = "two_photon_" self.movie_two_photon = TwoPhotonSeries( name=self.data['ophys_metadata'].get(name_module + "name"), imaging_plane=self.imaging_plane, data=data_to_store, unit=self.data['ophys_metadata'].get(name_module + "unit"), format=self.data['ophys_metadata'].get(name_module + "format"), field_of_view=self.data['ophys_metadata'].get(name_module + "field_of_view"), pmt_gain=self.data['ophys_metadata'].get(name_module + "pmt_gain"), scan_line_rate=self.data['ophys_metadata'].get(name_module + "scan_line_rate"), external_file=external_file, starting_frame=self.data['ophys_metadata'].get(name_module + "starting_frame"), bits_per_pixel=self.data['ophys_metadata'].get(name_module + "bits_per_pixel"), dimension=data_to_store.shape[1:], resolution=0.0, conversion=1.0, timestamps=self.data['ophys_metadata'].get(name_module + "timestamps"), starting_time=self.data['ophys_metadata'].get(name_module + "starting_time"), rate=1.0, comments="no comments", description="no description", control=self.data['ophys_metadata'].get(name_module + "control"), control_description=self.data['ophys_metadata'].get( name_module + "control_description"), parent=self.data['ophys_metadata'].get(name_module + "parent")) self.nwbfile.add_acquisition(self.movie_two_photon) def cicada_create_motion_correction(self): """ class pynwb.ophys.MotionCorrection(corrected_images_stacks={}, name='MotionCorrection') """ # Nom du module où récupérer les infos de métadonnée name_module = "motion_correction_" corrected_images_stacks = {} self.motion_correction = MotionCorrection( corrected_images_stacks=corrected_images_stacks, name="MotionCorrection") self.mod.add_data_interface(self.motion_correction) def cicada_add_corrected_image_stack(self, corrected=None, original=None, xy_translation=None): """ class pynwb.ophys.CorrectedImageStack(corrected, original, xy_translation, name='CorrectedImageStack') """ # Nom du module où récupérer les infos de métadonnée name_module = "corrected_image_stack_" self.corrected_image_stack = CorrectedImageStack( corrected=corrected, original=original, xy_translation=xy_translation, name="CorrectedImageStack") self.motion_correction.add_corrected_image_stack( self.corrected_image_stack) def cicada_add_plane_segmentation(self): """ class pynwb.ophys.PlaneSegmentation(description, imaging_plane, name=None, reference_images=None, id=None, columns=None, colnames=None) """ required_metadata = ["plane_segmentation_description"] metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") # Nom du module où récupérer les infos de métadonnée name_module = "plane_segmentation_" self.plane_segmentation = PlaneSegmentation( description=self.data['ophys_metadata'].get(name_module + "description"), imaging_plane=self.imaging_plane, name=self.data['ophys_metadata'].get(name_module + "name"), reference_images=self.data['ophys_metadata'].get( name_module + "reference_image"), id=self.data['ophys_metadata'].get(name_module + "id"), columns=self.data['ophys_metadata'].get(name_module + "columns"), colnames=self.data['ophys_metadata'].get(name_module + "colnames")) self.image_segmentation.add_plane_segmentation(self.plane_segmentation) def cicada_add_roi_in_plane_segmentation(self, pixel_mask=None, voxel_mask=None, image_mask=None, id_roi=None): """add_roi(pixel_mask=None, voxel_mask=None, image_mask=None, id=None) """ self.plane_segmentation.add_roi(pixel_mask=pixel_mask, voxel_mask=voxel_mask, image_mask=image_mask, id=id_roi) def cicada_create_roi_table_region_in_plane_segmentation( self, region=slice(None, None, None)): """create_roi_table_region(description, region=slice(None, None, None), name='rois')""" required_metadata = ["roi_table_region_description"] metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") # Nom du module où récupérer les infos de métadonnée name_module = "roi_table_region_" self.table_region = self.plane_segmentation.create_roi_table_region( description=self.data['ophys_metadata'].get(name_module + "description"), region=region, name="rois") def cicada_create_image_segmentation(self): """ class pynwb.ophys.ImageSegmentation(plane_segmentations={}, name='ImageSegmentation') """ # Nom du module où récupérer les infos de métadonnée name_module = "image_segmentation_" plane_segmentations = {} self.image_segmentation = ImageSegmentation( plane_segmentations=plane_segmentations, name="ImageSegmentation") self.mod.add_data_interface(self.image_segmentation) def cicada_create_fluorescence(self): """ class pynwb.ophys.Fluorescence(roi_response_series={}, name='Fluorescence') """ # Nom du module où récupérer les infos de métadonnée name_module = "fluorescence_" roi_response_series = {} self.fluorescence = Fluorescence( roi_response_series=roi_response_series, name="Fluorescence") self.mod.add_data_interface(self.fluorescence) def cicada_create_DfOverF(self): """ class pynwb.ophys.DfOverF(roi_response_series={}, name='DfOverF') """ # Nom du module où récupérer les infos de métadonnée name_module = "DfOverF_" roi_response_series = {} self.DfOverF = DfOverF(roi_response_series=roi_response_series, name="DfOverF") self.mod.add_data_interface(self.DfOverF) def cicada_add_roi_response_series(self, module, traces_data=None, rois=None): """ class pynwb.ophys.RoiResponseSeries(name, data, unit, rois, resolution=0.0, conversion=1.0, timestamps=None, starting_time=None, rate=None, comments='no comments', description='no description', control=None, control_description=None, parent=None) """ required_metadata = [ "roi_response_series_name", "roi_response_series_unit" ] metadata_to_add = list( set(required_metadata) - set(list(self.data['ophys_metadata'].keys()))) for i in metadata_to_add: self.data["ophys_metadata"] = self.add_required_metadata( self.data["ophys_metadata"], i, "ophys") # Nom du module où récupérer les infos de métadonnée name_module = "roi_response_series_" roi_response_series = RoiResponseSeries( name=self.data['ophys_metadata'].get(name_module + "name"), data=traces_data, unit=self.data['ophys_metadata'].get(name_module + "unit"), rois=self.table_region, resolution=0.0, conversion=1.0, timestamps=self.data['ophys_metadata'].get(name_module + "timestamp"), starting_time=self.data['ophys_metadata'].get(name_module + "starting_time"), rate=1.0, comments="no comments", description="no description", control=self.data['ophys_metadata'].get(name_module + "control"), control_description=self.data['ophys_metadata'].get( name_module + "control_description"), parent=self.data['ophys_metadata'].get(name_module + "parent")) if module == "DfOverF": self.DfOverF.add_roi_response_series(roi_response_series) elif module == "fluorescence": self.fluorescence.add_roi_response_series(roi_response_series) else: print( f"erreur : le nom du module doit être 'DfOverF' ou 'fluorescence', et non {module} !" ) def find_roi(self): # Chemin du dossier suite2p data_path = "C:/Users/François/Documents/dossier François/Stage INMED/" \ "Programmes/Godly Ultimate Interface/NWB/exp2nwb-master/src/suite2p" self.suite2p_data = dict() # Ouverture des fichiers stat et is_cell f = np.load(data_path + "/F.npy", allow_pickle=True) self.suite2p_data["F"] = f f_neu = np.load(data_path + "/Fneu.npy", allow_pickle=True) self.suite2p_data["Fneu"] = f_neu spks = np.load(data_path + "/spks.npy", allow_pickle=True) self.suite2p_data["spks"] = spks stat = np.load(data_path + "/stat.npy", allow_pickle=True) self.suite2p_data["stat"] = stat is_cell = np.load(data_path + "/iscell.npy", allow_pickle=True) self.suite2p_data["is_cell"] = is_cell # Trouve les coordonnées de chaque ROI (cellule ici) coord = [] for cell in np.arange(len(stat)): if is_cell[cell][0] == 0: continue print(is_cell[cell][0]) list_points_coord = [ (x, y, 1) for x, y in zip(stat[cell]["xpix"], stat[cell]["ypix"]) ] # coord.append(np.array(list_points_coord).transpose()) # La suite permet d'avoir uniquement les contours (sans les pixels intérieurs) """ # ATTENTION ! Il faut : from shapely.geometry import MultiPoint, LineString convex_hull = MultiPoint(list_points_coord).convex_hull if isinstance(convex_hull, LineString): coord_shapely = MultiPoint(list_points_coord).convex_hull.coords else: coord_shapely = MultiPoint(list_points_coord).convex_hull.exterior.coords coord.append(np.array(coord_shapely).transpose()) """ self.suite2p_data[ "coord"] = coord # Contient la liste des pixels inclus dans chaque ROI
plane_segmentation = image_segmentation_interface.create_plane_segmentation( name='plane_segmentation', description='Segmentation for imaging plane', imaging_plane=imaging_plane) for cell_specimen_id in cell_specimen_ids: curr_name = cell_specimen_id curr_image_mask = dataset.get_roi_mask_array([cell_specimen_id])[0] plane_segmentation.add_roi(id=curr_name, image_mask=curr_image_mask) ######################################## # 7) Next, we add a dF/F interface to the module. This allows us to write the dF/F timeseries data associated with # each ROI. dff_interface = DfOverF(name='dff_interface') ophys_module.add_data_interface(dff_interface) rt_region = plane_segmentation.create_roi_table_region( description='segmented cells with cell_specimen_ids', ) dFF_series = dff_interface.create_roi_response_series( name='df_over_f', data=dFF, unit='NA', rois=rt_region, timestamps=timestamps, ) ######################################## # Now that we have created the data set, we can write the file to disk:
def conversion_function(source_paths, f_nwb, metadata, add_raw=False, add_processed=True, add_behavior=True, plot_rois=False): """ Copy data stored in a set of .npz files to a single NWB file. Parameters ---------- source_paths : dict Dictionary with paths to source files/directories. e.g.: {'raw_data': {'type': 'file', 'path': ''}, 'raw_info': {'type': 'file', 'path': ''} 'processed_data': {'type': 'file', 'path': ''}, 'sparse_matrix': {'type': 'file', 'path': ''}, 'ref_image',: {'type': 'file', 'path': ''}} f_nwb : str Path to output NWB file, e.g. 'my_file.nwb'. metadata : dict Metadata dictionary add_raw : bool Whether to convert raw data or not. add_processed : bool Whether to convert processed data or not. add_behavior : bool Whether to convert behavior data or not. plot_rois : bool Plot ROIs """ # Source files file_raw = None file_info = None file_processed = None file_sparse_matrix = None file_reference_image = None for k, v in source_paths.items(): if source_paths[k]['path'] != '': fname = source_paths[k]['path'] if k == 'raw_data': file_raw = h5py.File(fname, 'r') if k == 'raw_info': file_info = scipy.io.loadmat(fname, struct_as_record=False, squeeze_me=True) if k == 'processed_data': file_processed = np.load(fname) if k == 'sparse_matrix': file_sparse_matrix = np.load(fname) if k == 'ref_image': file_reference_image = np.load(fname) # Initialize a NWB object nwb = NWBFile(**metadata['NWBFile']) # Create and add device device = Device(name=metadata['Ophys']['Device'][0]['name']) nwb.add_device(device) # Creates one Imaging Plane for each channel fs = 1. / (file_processed['time'][0][1] - file_processed['time'][0][0]) for meta_ip in metadata['Ophys']['ImagingPlane']: # Optical channel opt_ch = OpticalChannel( name=meta_ip['optical_channel'][0]['name'], description=meta_ip['optical_channel'][0]['description'], emission_lambda=meta_ip['optical_channel'][0]['emission_lambda']) nwb.create_imaging_plane( name=meta_ip['name'], optical_channel=opt_ch, description=meta_ip['description'], device=device, excitation_lambda=meta_ip['excitation_lambda'], imaging_rate=fs, indicator=meta_ip['indicator'], location=meta_ip['location'], ) # Raw optical data if add_raw: print('Adding raw data...') for meta_tps in metadata['Ophys']['TwoPhotonSeries']: if meta_tps['name'][-1] == 'R': raw_data = file_raw['R'] else: raw_data = file_raw['Y'] def data_gen(data): xl, yl, zl, tl = data.shape chunk = 0 while chunk < tl: val = data[:, :, :, chunk] chunk += 1 print('adding data chunk: ', chunk) yield val xl, yl, zl, tl = raw_data.shape tps_data = DataChunkIterator(data=data_gen(data=raw_data), iter_axis=0, maxshape=(tl, xl, yl, zl)) # Change dimensions from (X,Y,Z,T) in mat file to (T,X,Y,Z) nwb standard #raw_data = np.moveaxis(raw_data, -1, 0) tps = TwoPhotonSeries( name=meta_tps['name'], imaging_plane=nwb.imaging_planes[meta_tps['imaging_plane']], data=tps_data, rate=file_info['info'].daq.scanRate) nwb.add_acquisition(tps) # Processed data if add_processed: print('Adding processed data...') ophys_module = ProcessingModule( name='Ophys', description='contains optical physiology processed data.', ) nwb.add_processing_module(ophys_module) # Create Image Segmentation compartment img_seg = ImageSegmentation( name=metadata['Ophys']['ImageSegmentation']['name']) ophys_module.add(img_seg) # Create plane segmentation and add ROIs meta_ps = metadata['Ophys']['ImageSegmentation'][ 'plane_segmentations'][0] ps = img_seg.create_plane_segmentation( name=meta_ps['name'], description=meta_ps['description'], imaging_plane=nwb.imaging_planes[meta_ps['imaging_plane']], ) # Add ROIs indices = file_sparse_matrix['indices'] indptr = file_sparse_matrix['indptr'] dims = np.squeeze(file_processed['dims']) for start, stop in zip(indptr, indptr[1:]): voxel_mask = make_voxel_mask(indices[start:stop], dims) ps.add_roi(voxel_mask=voxel_mask) # Visualize 3D voxel masks if plot_rois: plot_rois_function(plane_segmentation=ps, indptr=indptr) # DFF measures dff = DfOverF(name=metadata['Ophys']['DfOverF']['name']) ophys_module.add(dff) # create ROI regions n_cells = file_processed['dFF'].shape[0] roi_region = ps.create_roi_table_region(description='RoiTableRegion', region=list(range(n_cells))) # create ROI response series dff_data = file_processed['dFF'] tt = file_processed['time'].ravel() meta_rrs = metadata['Ophys']['DfOverF']['roi_response_series'][0] meta_rrs['data'] = dff_data.T meta_rrs['rois'] = roi_region meta_rrs['timestamps'] = tt dff.create_roi_response_series(**meta_rrs) # Creates GrayscaleVolume containers and add a reference image grayscale_volume = GrayscaleVolume( name=metadata['Ophys']['GrayscaleVolume']['name'], data=file_reference_image['im']) ophys_module.add(grayscale_volume) # Behavior data if add_behavior: print('Adding behavior data...') # Ball motion behavior_mod = nwb.create_processing_module( name='Behavior', description='holds processed behavior data', ) meta_ts = metadata['Behavior']['TimeSeries'][0] meta_ts['data'] = file_processed['ball'].ravel() tt = file_processed['time'].ravel() meta_ts['timestamps'] = tt behavior_ts = TimeSeries(**meta_ts) behavior_mod.add(behavior_ts) # Re-arranges spatial data of body-points positions tracking pos = file_processed['dlc'] n_points = 8 pos_reshaped = pos.reshape( (-1, n_points, 3)) # dims=(nSamples,n_points,3) # Creates a Position object and add one SpatialSeries for each body-point position position = Position() for i in range(n_points): position.create_spatial_series( name='SpatialSeries_' + str(i), data=pos_reshaped[:, i, :], timestamps=tt, reference_frame= 'Description defining what the zero-position is.', conversion=np.nan) behavior_mod.add(position) # Trial times trialFlag = file_processed['trialFlag'].ravel() trial_inds = np.hstack( (0, np.where(np.diff(trialFlag))[0], trialFlag.shape[0] - 1)) trial_times = tt[trial_inds] for start, stop in zip(trial_times, trial_times[1:]): nwb.add_trial(start_time=start, stop_time=stop) # Saves to NWB file with NWBHDF5IO(f_nwb, mode='w') as io: io.write(nwb) print('NWB file saved with size: ', os.stat(f_nwb).st_size / 1e6, ' mb')
def setUpClass(self): device = Device("imaging_device_1") optical_channel = OpticalChannel("my_optchan", "description", 500.0) self.imaging_plane = ImagingPlane( name="imgpln1", optical_channel=optical_channel, description="a fake ImagingPlane", device=device, excitation_lambda=600.0, imaging_rate=300.0, indicator="GFP", location="somewhere in the brain", reference_frame="unknown", origin_coords=[10, 20], origin_coords_unit="millimeters", grid_spacing=[0.001, 0.001], grid_spacing_unit="millimeters", ) self.image_series = TwoPhotonSeries( name="test_image_series", data=np.random.randn(100, 5, 5), imaging_plane=self.imaging_plane, starting_frame=[0], rate=1.0, unit="n.a", ) self.img_seg = ImageSegmentation() self.ps2 = self.img_seg.create_plane_segmentation( "output from segmenting my favorite imaging plane", self.imaging_plane, "2d_plane_seg", self.image_series, ) self.ps2.add_column("type", "desc") self.ps2.add_column("type2", "desc") w, h = 3, 3 img_mask1 = np.zeros((w, h)) img_mask1[0, 0] = 1.1 img_mask1[1, 1] = 1.2 img_mask1[2, 2] = 1.3 self.ps2.add_roi(image_mask=img_mask1, type=1, type2=0) img_mask2 = np.zeros((w, h)) img_mask2[0, 0] = 2.1 img_mask2[1, 1] = 2.2 self.ps2.add_roi(image_mask=img_mask2, type=1, type2=1) img_mask2 = np.zeros((w, h)) img_mask2[0, 0] = 9.1 img_mask2[1, 1] = 10.2 self.ps2.add_roi(image_mask=img_mask2, type=2, type2=0) img_mask2 = np.zeros((w, h)) img_mask2[0, 0] = 3.5 img_mask2[1, 1] = 5.6 self.ps2.add_roi(image_mask=img_mask2, type=2, type2=1) fl = Fluorescence() rt_region = self.ps2.create_roi_table_region("the first of two ROIs", region=[0, 1, 2, 3]) rois_shape = 5 data = np.arange(10 * rois_shape).reshape([10, -1], order='F') timestamps = np.array( [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) rrs = fl.create_roi_response_series(name="my_rrs", data=data, rois=rt_region, unit="lumens", timestamps=timestamps) self.df_over_f = DfOverF(rrs)
def setUp(self): nwbfile = NWBFile( 'my first synthetic recording', 'EXAMPLE_ID', datetime.now(tzlocal()), experimenter='Dr. Bilbo Baggins', lab='Bag End Laboratory', institution='University of Middle Earth at the Shire', experiment_description=('I went on an adventure with thirteen ' 'dwarves to reclaim vast treasures.'), session_id='LONELYMTN') device = Device('imaging_device_1') nwbfile.add_device(device) optical_channel = OpticalChannel('my_optchan', 'description', 500.) imaging_plane = nwbfile.create_imaging_plane( 'my_imgpln', optical_channel, 'a very interesting part of the brain', device, 600., 300., 'GFP', 'my favorite brain location', np.ones((5, 5, 3)), 4.0, 'manifold unit', 'A frame to refer to') self.image_series = TwoPhotonSeries(name='test_iS', dimension=[2], data=np.random.rand(10, 5, 5, 3), external_file=['images.tiff'], imaging_plane=imaging_plane, starting_frame=[0], format='tiff', starting_time=0.0, rate=1.0) nwbfile.add_acquisition(self.image_series) mod = nwbfile.create_processing_module( 'ophys', 'contains optical physiology processed data') img_seg = ImageSegmentation() mod.add(img_seg) ps = img_seg.create_plane_segmentation( 'output from segmenting my favorite imaging plane', imaging_plane, 'my_planeseg', self.image_series) w, h = 3, 3 pix_mask1 = [(0, 0, 1.1), (1, 1, 1.2), (2, 2, 1.3)] vox_mask1 = [(0, 0, 0, 1.1), (1, 1, 1, 1.2), (2, 2, 2, 1.3)] img_mask1 = [[0.0 for x in range(w)] for y in range(h)] img_mask1[0][0] = 1.1 img_mask1[1][1] = 1.2 img_mask1[2][2] = 1.3 ps.add_roi(pixel_mask=pix_mask1, image_mask=img_mask1, voxel_mask=vox_mask1) pix_mask2 = [(0, 0, 2.1), (1, 1, 2.2)] vox_mask2 = [(0, 0, 0, 2.1), (1, 1, 1, 2.2)] img_mask2 = [[0.0 for x in range(w)] for y in range(h)] img_mask2[0][0] = 2.1 img_mask2[1][1] = 2.2 ps.add_roi(pixel_mask=pix_mask2, image_mask=img_mask2, voxel_mask=vox_mask2) fl = Fluorescence() mod.add(fl) rt_region = ps.create_roi_table_region('the first of two ROIs', region=[0]) data = np.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]).reshape(10, 1) timestamps = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] rrs = fl.create_roi_response_series('my_rrs', data, rt_region, unit='lumens', timestamps=timestamps) self.df_over_f = DfOverF(rrs)