def copy_nwb_file(src, dest): """"Copy" .nwb file by opening and saving into a new path. New file (`dest`) then should have new `object_id` attribute, and thus be considered "different" although containing the same data Parameters ---------- src: str Source file dest: str Destination file or directory. If points to an existing directory, file with the same name is created (exception if already exists). If not an existing directory - target directory is created. Returns ------- dest """ if op.isdir(dest): dest = op.join(dest, op.basename(src)) else: os.makedirs(op.dirname(dest), exist_ok=True) # The simplest way yoh could find with pynwb.NWBHDF5IO(src, "r") as ior, pynwb.NWBHDF5IO( dest, "w", manager=ior.manager) as iow: iow.write(ior.read().copy(), link_data=False) return dest
def copy_nwb_file(src: Union[str, Path], dest: Union[str, Path]) -> str: """ "Copy" .nwb file by opening and saving into a new path. New file (`dest`) then should have new `object_id` attribute, and thus be considered "different" although containing the same data Parameters ---------- src: str Source file dest: str Destination file or directory. If points to an existing directory, file with the same name is created (exception if already exists). If not an existing directory - target directory is created. Returns ------- dest """ if op.isdir(dest): dest = op.join(dest, op.basename(src)) else: os.makedirs(op.dirname(dest), exist_ok=True) with pynwb.NWBHDF5IO(src, "r") as ior, pynwb.NWBHDF5IO(dest, "w") as iow: data = ior.read() data.generate_new_id() iow.export(ior, nwbfile=data) return str(dest)
def test_roundtrip(self): legacy_map = pynwb.legacy.get_type_map() io2 = pynwb.NWBHDF5IO(self.src_filename, extensions=legacy_map, mode='r') read_data = io2.read() io = pynwb.NWBHDF5IO(self.filename, mode='w') io.write(read_data) io.close() io2.close()
def copy_nwb_link_raw_ephys(nwb_file_name, out_nwb_file_name): print( f'Creating a copy of NWB file {nwb_file_name} with link to raw ephys data: {out_nwb_file_name}' ) nwb_file_abs_path = Nwbfile.get_abs_path(nwb_file_name) assert os.path.exists( nwb_file_abs_path), f'File does not exist: {nwb_file_abs_path}' out_nwb_file_abs_path = Nwbfile.get_abs_path(out_nwb_file_name) if os.path.exists(out_nwb_file_name): warnings.warn( f'Output file {out_nwb_file_abs_path} exists and will be overwritten.' ) with pynwb.NWBHDF5IO(path=nwb_file_abs_path, mode='r', load_namespaces=True) as input_io: nwbf = input_io.read() # pop off acquisition electricalseries eseries_list = get_raw_eseries(nwbf) for eseries in eseries_list: nwbf.acquisition.pop(eseries.name) # export the new NWB file with pynwb.NWBHDF5IO(path=out_nwb_file_abs_path, mode='w', manager=input_io.manager) as export_io: export_io.export(input_io, nwbf) # add link from new file back to raw ephys data in raw data file using fresh build manager and container cache # where the acquisition electricalseries objects have not been removed with pynwb.NWBHDF5IO(path=nwb_file_abs_path, mode='r', load_namespaces=True) as input_io: nwbf_raw = input_io.read() eseries_list = get_raw_eseries(nwbf_raw) with pynwb.NWBHDF5IO(path=out_nwb_file_abs_path, mode='a', manager=input_io.manager) as export_io: nwbf_export = export_io.read() # add link to raw ephys ElectricalSeries in raw data file for eseries in eseries_list: nwbf_export.add_acquisition(eseries) nwbf_export.set_modified( ) # workaround until the above sets modified=True on the file export_io.write(nwbf_export) return out_nwb_file_abs_path
def test_nwb_copy_file(): # Create a nwb file to be copied path_here = os.path.dirname(os.path.abspath(__file__)) path_old_file = os.path.join(path_here, 'old_file.nwb') nwbfile_old = create_random_nwbfile() with pynwb.NWBHDF5IO(path_old_file, 'w') as io: io.write(nwbfile_old) path_new_file = os.path.join(path_here, 'new_file.nwb') cp_objs = { 'institution': True, 'lab': True, 'session': True, 'devices': True, 'electrode_groups': True, 'electrodes': True, 'epochs': True, 'trials': True, 'subject': True, 'acquisition': ['raw_data', 'mic'], 'stimulus': ['stim1', 'stim2'], 'ecephys': ['LFP', 'high_gamma'] } nwb_copy_file( old_file=path_old_file, new_file=path_new_file, cp_objs=cp_objs, save_to_file=True, ) with pynwb.NWBHDF5IO(path_new_file, 'r') as io: nwbfile_new = io.read() assert nwbfile_new.institution == nwbfile_old.institution assert nwbfile_new.session_start_time == nwbfile_old.session_start_time assert nwbfile_new.devices is not None assert_array_equal(nwbfile_new.electrodes.columns[0][:], nwbfile_old.electrodes.columns[0][:]) assert nwbfile_new.electrode_groups.keys( ) == nwbfile_old.electrode_groups.keys() for (v1, v2) in zip(nwbfile_new.stimulus.values(), nwbfile_old.stimulus.values()): assert_array_equal(v1.data, v2.data) for k in nwbfile_new.acquisition.keys(): assert_array_equal(nwbfile_new.acquisition[k].data, nwbfile_old.acquisition[k].data) # Remove test nwb files os.remove(path_old_file) os.remove(path_new_file)
def f(nwbfile, data_object_cls, **data_object_cls_kwargs): tmp_dir = tmp_path / "data_object_nwb_roundtrip_tests" tmp_dir.mkdir() nwb_path = tmp_dir / "data_object_roundtrip_nwbfile.nwb" with pynwb.NWBHDF5IO(str(nwb_path), 'w') as write_io: write_io.write(nwbfile) with pynwb.NWBHDF5IO(str(nwb_path), 'r') as read_io: roundtripped_nwbfile = read_io.read() data_object_instance = data_object_cls.from_nwb( roundtripped_nwbfile, **data_object_cls_kwargs) return data_object_instance
def test_roundtrip(tmpdir_factory, nwbfile): tmpdir = str(tmpdir_factory.mktemp("test_serialize")) first_path = os.path.join(tmpdir, "first.nwb") second_path = os.path.join(tmpdir, "second.nwb") with pynwb.NWBHDF5IO(first_path, "w") as writer: writer.write(nwbfile) sink = nwb2_sink.Nwb2Sink(first_path) sink.register("institution", "AIBS") sink.serialize({"output_path": second_path}) with pynwb.NWBHDF5IO(second_path, "r", load_namespaces=True) as reader: obt = reader.read() assert obt.institution == "AIBS"
def load_single_datafile(datafile): io = pynwb.NWBHDF5IO(datafile, 'r') nwbfile = io.read() t, x = nwbfile.acquisition['image_timeseries'].timestamps[:],\ nwbfile.acquisition['image_timeseries'].data[:,:,:] io.close() return t, x
def loadTraces(path, verbose=False): try: # Note: NWB HD5 traces are lazily ready, so this IO must remain # open until we actively read all trace data out of file. io = pynwb.NWBHDF5IO(path, 'r+') nwbFile = io.read() traces = {} for key, value in nwbFile.acquisition.items(): if key.startswith(TRACE_PREFIX): nodeID = key[len(TRACE_PREFIX):] loadedData = value.data[:] # This is where data is loaded... traces[nodeID] = pynwb.base.TimeSeries( name=key, data=loadedData, unit=value.unit, rate=value.rate, ) if verbose: print("%s: %s values @ %dhz" % (nodeID, traces[nodeID].data.shape, int(traces[nodeID].rate))) return traces except Exception as e: print("Error reading file!") print(e) return None
def insert_from_nwb(self, nwb_file_name): try: io = pynwb.NWBHDF5IO(nwb_file_name, mode='r') nwbf = io.read() except: print('Error: nwbfile {} cannot be opened for reading\n'.format( nwb_file_name)) print(io.read()) io.close() return #get the subject information and create a dictionary from it sub = nwbf.subject subject_dict = dict() subject_dict['subject_id'] = sub.subject_id if sub.age is None: subject_dict['age'] = 'unknown' subject_dict['description'] = sub.description subject_dict['genotype'] = sub.genotype sex = 'U' if (sub.sex == 'Male'): sex = 'M' elif (sub.sex == 'Female'): sex = 'F' subject_dict['sex'] = sex subject_dict['species'] = sub.species self.insert1(subject_dict,skip_duplicates=True) io.close()
def validate_nwbs(): global TOTAL, FAILURES, ERRORS logging.info('running validation tests on NWB files') examples_nwbs = glob.glob('*.nwb') import pynwb TOTAL += len(examples_nwbs) for nwb in examples_nwbs: try: logging.info("Validating file %s" % nwb) ws = list() with warnings.catch_warnings(record=True) as tmp: with pynwb.NWBHDF5IO(nwb, mode='r') as io: errors = pynwb.validate(io) if errors: FAILURES += 1 ERRORS += 1 for err in errors: print("Error: %s" % err) for w in tmp: # ignore RunTimeWarnings about importing if isinstance(w.message, RuntimeWarning) and not warning_re.match(str(w.message)): ws.append(w) for w in ws: warnings.showwarning(w.message, w.category, w.filename, w.lineno, w.line) except Exception: print(traceback.format_exc()) FAILURES += 1 ERRORS += 1
def make(self, key): self.insert1(key) try: io = pynwb.NWBHDF5IO(key['nwb_file_name'], mode='r') nwbf = io.read() except: print( 'Error in Experimenter: nwbfile {} cannot be opened for reading\n' .format(key['nwb_file_name'])) io.close() return for e in nwbf.experimenter: # check to see if the experimenter is in the lab member list, and if not add her / him if {'lab_member_name': e} not in common_lab.LabMember(): names = [x.strip() for x in e.split(' ')] labmember_dict = dict() labmember_dict['lab_member_name'] = e if len(names) == 2: labmember_dict['first_name'] = names[0] labmember_dict['last_name'] = names[1] else: print( 'Warning: experimenter {} does not seem to have a first and last name' .format(e)) labmember_dict['first_name'] = 'unknown' labmember_dict['last_name'] = 'unknown' common_lab.LabMember.insert1(labmember_dict) # now insert the experimenter, which is a combination of the nwbfile and the name key['lab_member_name'] = e ExperimenterList.Experimenter.insert1(key) io.close()
def make(self, key): try: io = pynwb.NWBHDF5IO(key['nwb_file_name'], mode='r') nwbf = io.read() except: print( 'Error in Session: nwbfile {} cannot be opened for reading\n'. format(key['nwb_file_name'])) print(io.read()) io.close() return # populate the Session with information from the file key['subject_id'] = nwbf.subject.subject_id key['institution_name'] = nwbf.institution key['lab_name'] = nwbf.lab # Remove when bug fixed: session_id can be empty in current version key['session_id'] = nwbf.session_id if (key['session_id'] == None): key['session_id'] = 'tmp_id' key['session_description'] = nwbf.session_description key['session_start_time'] = nwbf.session_start_time key['experiment_description'] = nwbf.experiment_description key['timestamps_reference_time'] = nwbf.timestamps_reference_time self.insert1(key) # insert the devices ''' Uncomment when devices correct devices = list(nwbf.devices.keys()) for d in devices: Session.DataAcqDevice.insert1( dict(nwb_file_name=key['nwb_file_name'], device_name=d), skip_duplicates=True) ''' io.close()
def test_read_nwb_probe_successfully(self): shanks_electrode = ShanksElectrode(name='electrode_shank', rel_x=1.0, rel_y=2.0, rel_z=3.0) shank = Shank(name='shank') shank.add_shanks_electrode(shanks_electrode) probe = Probe(name='probe', units='mm', id=1, probe_type='type_1', probe_description='2', contact_size=1.0, contact_side_numbering=False) probe.add_shank(shank) self.nwb_file_content.add_device(probe) nwb_file_handler = NWBHDF5IO('probe.nwb', mode='w') nwb_file_handler.write(self.nwb_file_content) nwb_file_handler.close() self.assertTrue(os.path.exists('probe.nwb')) with pynwb.NWBHDF5IO('probe.nwb', 'r', load_namespaces=True) as nwb_file_handler: nwb_file = nwb_file_handler.read() self.assertContainerEqual(nwb_file.devices['probe'], probe) self.delete_nwb('probe')
def test_read_nwb_nwb_electrode_group_successfully(self): device = Device('device_0') self.nwb_file_content.add_device(device) nwb_electrode_group = NwbElectrodeGroup( name='nwb_electrode_group_0', description='Sample description', location='Sample location', device=device, targeted_location='predicted location', targeted_x=1.0, targeted_y=2.0, targeted_z=3.0, units='um') self.nwb_file_content.add_electrode_group(nwb_electrode_group) nwb_file_handler = NWBHDF5IO('nwb_electrode_group.nwb', mode='w') nwb_file_handler.write(self.nwb_file_content) nwb_file_handler.close() self.assertTrue(os.path.exists('nwb_electrode_group.nwb')) with pynwb.NWBHDF5IO('nwb_electrode_group.nwb', 'r') as nwb_file_handler: nwb_file = nwb_file_handler.read() self.assertEqual( nwb_file.electrode_groups['nwb_electrode_group_0'].name, nwb_electrode_group.name) self.assertEqual( nwb_file.electrode_groups['nwb_electrode_group_0']. targeted_location, nwb_electrode_group.targeted_location) self.delete_nwb('nwb_electrode_group')
def refresh_file(self): """Re-opens the current file, for when new data is included""" if hasattr(self, 'io'): self.io.close() # closes current NWB file self.io = pynwb.NWBHDF5IO(str(self.source_path), 'r+', load_namespaces=True) self.nwb = self.io.read() # reads NWB file # Searches for signal source on file try: # Tries to load Raw data lis = list(self.nwb.acquisition.keys()) for i in lis: # Check if there is ElectricalSeries in acquisition group if type(self.nwb.acquisition[i]).__name__ == 'ElectricalSeries': self.source = self.nwb.acquisition[i] self.parent.combo3.setCurrentIndex(self.parent.combo3.findText('raw')) except: None try: # Tries to load preprocessed data self.source = self.nwb.processing['ecephys'].data_interfaces['LFP'].electrical_series['preprocessed'] self.parent.combo3.setCurrentIndex(self.parent.combo3.findText('preprocessed')) except: None try: # Tries to load High Gamma data self.source = self.nwb.processing['ecephys'].data_interfaces['high_gamma'] self.parent.combo3.setCurrentIndex(self.parent.combo3.findText('high gamma')) except: None self.plotData = self.source.data self.fs_signal = self.source.rate # sampling frequency [Hz] self.tbin_signal = 1 / self.fs_signal # time bin duration [seconds] self.nBins = self.source.data.shape[0] # total number of bins self.load_stimuli() # load stimuli signals (audio) self.updateCurXAxisPosition()
def load_max_projection_nwb(sess_files): """ load_max_projection_nwb(sess_files) Returns maximum projection image of downsampled z-stack as an array, from NWB files. Required args: - sess_files (Path): full path names of the session files Returns: - max_proj (2D array): maximum projection image across downsampled z-stack (hei x wei), with pixel intensity in 0 (incl) to 256 (excl) range ("uint8" datatype). """ ophys_file = sess_file_util.select_nwb_sess_path(sess_files, ophys=True) with pynwb.NWBHDF5IO(str(ophys_file), "r") as f: nwbfile_in = f.read() ophys_module = nwbfile_in.get_processing_module("ophys") main_field = "PlaneImages" data_field = "max_projection" try: max_proj = ophys_module.get_data_interface( main_field).get_image(data_field)[()].astype("uint8") except KeyError as err: raise KeyError( "Could not find a maximum projection plane image " f"for {ophys_file} due to: {err}" ) return max_proj
def make(self, key): # get the NWB file name from this session nwb_file_name = key['nwb_file_name'] try: io = pynwb.NWBHDF5IO(nwb_file_name, mode='r') nwbf = io.read() except: print('Error: nwbfile {} cannot be opened for reading\n'.format( nwb_file_name)) return interval_list_dict = dict() interval_list_dict['nwb_file_name'] = nwb_file_name units = nwbf.units.to_dataframe() for unum in range(len(units)): # for each unit we first need to an an interval list for this unit interval_list_dict[ 'interval_name'] = 'unit {} interval list'.format(unum) interval_list_dict['valid_times'] = np.asarray( units.iloc[unum]['obs_intervals']) common_interval.IntervalList.insert1(interval_list_dict, skip_duplicates=True) try: key['unit_id'] = units.iloc[unum]['id'] except: key['unit_id'] = unum egroup = units.iloc[unum]['electrode_group'] key['electrode_group_name'] = egroup.name key['interval_name'] = interval_list_dict['interval_name'] key['cluster_name'] = units.iloc[unum]['cluster_name'] #key['spike_times'] = np.asarray(units.iloc[unum]['spike_times']) key['nwb_object_id'] = -1 # FIX self.insert1(key)
def test_pynwb_io(simple1_nwb): # To verify that our dependencies spec is sufficient to avoid # stepping into known pynwb/hdmf issues with pynwb.NWBHDF5IO(str(simple1_nwb), "r", load_namespaces=True) as reader: nwbfile = reader.read() assert repr(nwbfile) assert str(nwbfile)
def test_build_and_read_nwb(self): metadata = MetadataManager( str(path) + '/rec_to_nwb/rec_to_nwb/test/processing/res/metadata.yml', [ str(path) + '/rec_to_nwb/rec_to_nwb/test/processing/res/probe1.yml', str(path) + '/rec_to_nwb/rec_to_nwb/test/processing/res/probe2.yml', str(path) + '/rec_to_nwb/rec_to_nwb/test/processing/res/probe3.yml' ]) builder = RawToNWBBuilder( animal_name='beans', data_path=str(path) + '/rec_to_nwb/rec_to_nwb/test/test_data/', dates=['20190718'], nwb_metadata=metadata, output_path='', extract_spikes=False, extract_mda=True, extract_lfps=False, extract_analog=True, extract_dio=True, overwrite=True, trodes_rec_export_args=_DEFAULT_TRODES_REC_EXPORT_ARGS, video_path=str(path) + '/rec_to_nwb/rec_to_nwb/test/test_data') builder.build_nwb() self.assertTrue(os.path.exists('beans20190718.nwb')) with pynwb.NWBHDF5IO('beans20190718.nwb', 'r', load_namespaces=True) as nwb_file_handler: nwb_file = nwb_file_handler.read() print(nwb_file) if os.path.isfile('beans20190718.nwb'): os.remove('beans20190718.nwb')
def test_embed_spike_times_into_nwb(tmpdir_factory): sweep_spike_times = { 3: [56.0, 44.6, 661.1], 4: [156.0, 144.6, 61.1, 334.944] } tmp_dir = tmpdir_factory.mktemp("embed_spikes_into_nwb") input_nwb_file_name = str(tmp_dir.join("input.nwb")) output_nwb_file_name = str(tmp_dir.join("output.nwb")) make_skeleton_nwb2_file(input_nwb_file_name) append_spike_times(input_nwb_file_name, sweep_spike_times, output_nwb_path=output_nwb_file_name) with pynwb.NWBHDF5IO(output_nwb_file_name, mode='r', load_namespaces=True) as nwb_io: nwbfile = nwb_io.read() spikes = nwbfile.get_processing_module('spikes') for sweep_num, spike_times in sweep_spike_times.items(): sweep_spikes = spikes.get_data_interface( f"Sweep_{sweep_num}").timestamps assert np.allclose(sweep_spikes, spike_times)
def test_cli_nwb2(cli_runner): in_nwb_path, out_nwb_path = simple_nwb(cli_runner.tmpdir) input_json = { "metadata": [ { "name": "subject_id", "value": "23", "sinks": ["nwb2"] } ], "nwb2_sinks": [ { "name": "nwb2", "config": {"nwb_path": in_nwb_path}, "targets": [ {"output_path": out_nwb_path} ] } ] } out_json = cli_runner.run(input_json) os.remove(in_nwb_path) # make sure we aren't linking obt_nwb_path = out_json["sinks"]["nwb2"]["targets"][0]["output_path"] with pynwb.NWBHDF5IO(path=obt_nwb_path, mode="r", load_namespaces=True) as reader: obt = reader.read() assert obt.subject.subject_id == "23" assert np.allclose( obt.get_acquisition("a timeseries").data[:], [1, 2, 3] )
def make(self, key): nwb_file_name = key['nwb_file_name'] nwb_file_abspath = Nwbfile().get_abs_path(nwb_file_name) self.insert1({'nwb_file_name': nwb_file_name}, skip_duplicates=True) with pynwb.NWBHDF5IO(path=nwb_file_abspath, mode='r') as io: nwbf = io.read() for e in nwbf.experimenter: # check to see if the experimenter is in the lab member list, and if not add her / him if {'lab_member_name': e} not in LabMember(): names = [x.strip() for x in e.split(' ')] labmember_dict = dict() labmember_dict['lab_member_name'] = e if len(names) == 2: labmember_dict['first_name'] = names[0] labmember_dict['last_name'] = names[1] else: print( 'Warning: experimenter {} does not seem to have a first and last name' .format(e)) labmember_dict['first_name'] = 'unknown' labmember_dict['last_name'] = 'unknown' LabMember().insert1(labmember_dict) # now insert the experimenter, which is a combination of the nwbfile and the name ExperimenterList().Experimenter().insert1({ 'nwb_file_name': nwb_file_name, 'lab_member_name': e })
def mies_nwb_data(tmp_nwb_path): nwbfile = nwbfile_to_test() print(tmp_nwb_path) with pynwb.NWBHDF5IO(path=tmp_nwb_path, mode="w") as writer: writer.write(nwbfile) ontology = StimulusOntology( [[('name', 'expected name'), ('code', 'STIMULUS_CODE')], [('name', 'test name'), ('code', 'extpexpend')] ]) class Notebook(LabNotebookReader): def get_value(self, key, sweep_num, default): return { ("Scale Factor", 4): 200.0, ("Set Sweep Count", 4): "1" }.get((key, sweep_num), default) fake_notebook = Notebook() return MIESNWBData(nwb_file=tmp_nwb_path, notebook=fake_notebook, ontology=ontology)
def __init__(self, nwb_file): self.nwb_file = nwb_file self.nwb_root = h5py.File(self.nwb_file, 'r') io = pynwb.NWBHDF5IO(self.nwb_file, 'r') self.nwb_session = io.read() self._stimulus_search = None
def f(nwbfile, api_cls, **api_kwargs): tmpdir = str(tmpdir_factory.mktemp('nwb_roundtrip_tests')) nwb_path = os.path.join(tmpdir, 'nwbfile.nwb') with pynwb.NWBHDF5IO(nwb_path, 'w') as write_io: write_io.write(nwbfile) return api_cls(nwb_path, **api_kwargs)
def validate(path, devel_debug=False): """Run validation on a file and return errors In case of an exception being thrown, an error message added to the returned list of validation errors Parameters ---------- path: str or Path """ path = str(path) # Might come in as pathlib's PATH try: with pynwb.NWBHDF5IO(path, "r", load_namespaces=True) as reader: errors = pynwb.validate(reader) lgr.warning( "pynwb validation errors for %s: %s", path, errors, extra={"validating": True}, ) except Exception as exc: if devel_debug: raise lgr.warning("Failed to validate %s: %s", path, exc, extra={"validating": True}) errors = [f"Failed to validate {path}: {exc}"] # To overcome # https://github.com/NeurodataWithoutBorders/pynwb/issues/1090 # https://github.com/NeurodataWithoutBorders/pynwb/issues/1091 re_ok_prior_210 = re.compile( r"general/(experimenter|related_publications)\): " r"incorrect shape - expected an array of shape .\[None\].") try: version = get_nwb_version(path, sanitize=False) except Exception: # we just will not remove any errors, it is required so should be some pass else: if version is not None: # Explicitly sanitize so we collect warnings. # TODO: later cast into proper ERRORs version = _sanitize_nwb_version(version, log=errors.append) loosever = LooseVersion(version) if loosever and loosever < "2.1.0": errors_ = errors[:] errors = [ e for e in errors if not re_ok_prior_210.search(str(e)) ] if errors != errors_: lgr.debug( "Filtered out %d validation errors on %s", len(errors_) - len(errors), path, ) return errors
def make(self, key): # These imports must go here to avoid cyclic dependencies from .common_task import Task, TaskEpoch from .common_interval import IntervalList #from .common_ephys import Unit nwb_file_name = key['nwb_file_name'] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) with pynwb.NWBHDF5IO(path=nwb_file_abspath, mode='r') as io: nwbf = io.read() print('Institution...') Institution().insert_from_nwbfile(nwbf) print('Lab...') Lab().insert_from_nwbfile(nwbf) print('LabMember...') LabMember().insert_from_nwbfile(nwbf) print('Subject...') Subject().insert_from_nwbfile(nwbf) print('DataAcquisitionDevice...') DataAcquisitionDevice().insert_from_nwbfile(nwbf) print('CameraDevice...') CameraDevice().insert_from_nwbfile(nwbf) print('Probe...') Probe().insert_from_nwbfile(nwbf) self.insert1( { 'nwb_file_name': nwb_file_name, 'subject_id': nwbf.subject.subject_id, 'institution_name': nwbf.institution, 'lab_name': nwbf.lab, 'session_id': nwbf.session_id if nwbf.session_id is not None else 'tmp_id', 'session_description': nwbf.session_description, 'session_start_time': nwbf.session_start_time, 'timestamps_reference_time': nwbf.timestamps_reference_time, 'experiment_description': nwbf.experiment_description }, skip_duplicates=True) print('Skipping Apparatus for now...') # Apparatus().insert_from_nwbfile(nwbf) print('IntervalList...') IntervalList().insert_from_nwbfile(nwbf, nwb_file_name=nwb_file_name)
def create(self, nwb_file_name): ''' Opens the input NWB file, creates a copy, writes out the copy to disk and return the name of the new file :param nwb_file_name: str :return: analysis_file_name: str ''' # nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) io = pynwb.NWBHDF5IO(path=nwb_file_abspath, mode='r') nwbf = io.read() # pop off the unnecessary elements to save space nwb_fields = nwbf.fields for field in nwb_fields: if field not in nwb_keep_fields: nwb_object = getattr(nwbf, field) if type(nwb_object) is pynwb.core.LabelledDict: for module in list(nwb_object.keys()): mod = nwb_object.pop(module) key = dict() key['nwb_file_name'] = nwb_file_name # get the current number of analysis files related to this nwb file n_analysis_files = len((AnalysisNwbfile() & {'parent_nwb_file': nwb_file_name}).fetch()) # name the file, adding the number of files with preceeding zeros analysis_file_name = os.path.splitext(nwb_file_name)[0] + '_' + str(n_analysis_files).zfill(8) + '.nwb' key['analysis_file_name'] = analysis_file_name key['analysis_file_description'] = '' # write the new file print(f'writing new NWB file {analysis_file_name}') analysis_file_abs_path = AnalysisNwbfile.get_abs_path(analysis_file_name) key['analysis_file_abs_path'] = analysis_file_abs_path # export the new NWB file with pynwb.NWBHDF5IO(path=analysis_file_abs_path, mode='w') as export_io: export_io.export(io, nwbf) io.close() # insert the new file self.insert1(key) return analysis_file_name
def make_nwb_file(filename, *args, cache_spec=False, **kwargs): """A little helper to produce an .nwb file in the path using NWBFile Note: it doesn't cache_spec by default """ nwbfile = pynwb.NWBFile(*args, **kwargs) with pynwb.NWBHDF5IO(filename, "w") as io: io.write(nwbfile, cache_spec=cache_spec) return filename