def test_hdf5(): """Test HDF5 IO """ tempdir = _TempDir() test_file = op.join(tempdir, "test.hdf5") sp = np.eye(3) if sparse is None else sparse.eye(3, 3, format="csc") sp[2, 2] = 2 x = dict( a=dict(b=np.zeros(3)), c=np.zeros(2, np.complex128), d=[dict(e=(1, -2.0, "hello", u"goodbyeu\u2764")), None], f=sp, ) write_hdf5(test_file, 1) assert_equal(read_hdf5(test_file), 1) assert_raises(IOError, write_hdf5, test_file, x) # file exists write_hdf5(test_file, x, overwrite=True) assert_raises(IOError, read_hdf5, test_file + "FOO") # not found xx = read_hdf5(test_file) assert_true(object_diff(x, xx) == "") # no assert_equal, ugly output # bad title assert_raises(ValueError, read_hdf5, test_file, title="nonexist") assert_raises(ValueError, write_hdf5, test_file, x, overwrite=True, title=1) assert_raises(ValueError, read_hdf5, test_file, title=1) # unsupported objects assert_raises(TypeError, write_hdf5, test_file, {1: "foo"}, overwrite=True) assert_raises(TypeError, write_hdf5, test_file, object, overwrite=True)
def test_path_support(): tempdir = _TempDir() test_file = op.join(tempdir, 'test.hdf5') write_hdf5(test_file, 1, title='first') write_hdf5(test_file, 2, title='second/third', overwrite='update') assert_raises(ValueError, read_hdf5, test_file, title='second') assert_equal(read_hdf5(test_file, 'first'), 1) assert_equal(read_hdf5(test_file, 'second/third'), 2)
def test_path_support(tmpdir): tempdir = str(tmpdir) test_file = op.join(tempdir, 'test.hdf5') write_hdf5(test_file, 1, title='first') write_hdf5(test_file, 2, title='second/third', overwrite='update') pytest.raises(ValueError, read_hdf5, test_file, title='second') assert_equal(read_hdf5(test_file, 'first'), 1) assert_equal(read_hdf5(test_file, 'second/third'), 2)
def read_markers(fname): contents = h5_listdir(fname) markers = list() epochs = None if 'nice/markers/order' in contents: marker_order = read_hdf5(fname, title='nice/markers/order', slash='replace') else: marker_order = [k for k in contents if 'nice/marker/' in k] if any('nice/data/epochs' in k for k in contents): epochs = read_hdf5(fname, title='nice/data/epochs', slash='replace') # MNE fix if 'filename' in epochs['info']: del (epochs['info']['filename']) epochs = mne.EpochsArray(data=epochs.pop('_data'), info=Info(epochs.pop('info')), tmin=epochs.pop('tmin'), event_id=epochs.pop('event_id'), events=epochs.pop('events'), reject=epochs.pop('reject'), flat=epochs.pop('flat')) # Read all PowerSpectralDensityEstimator estimators estimators = [ k for k in contents if 'nice/container/PowerSpectralDensityEstimator' in k ] all_estimators = {} for estimator_name in estimators: estimator_comment = estimator_name.split('/')[-1] this_estimator = read_psd_estimator(fname, comment=estimator_comment) all_estimators[estimator_comment] = this_estimator for content in marker_order: _, _, my_class_name, comment = content.split('/') my_class = _markers_classes[my_class_name] if issubclass(my_class, BaseTimeLocked): if not epochs: raise RuntimeError( 'Something weird has happened. You want to read a ' 'marker that depends on epochs but ' 'I could not find any epochs in the file you gave me.') markers.append(my_class._read(fname, epochs, comment=comment)) elif issubclass(my_class, BasePowerSpectralDensity): markers.append( my_class._read(fname, estimators=all_estimators, comment=comment)) elif issubclass(my_class, BaseMarker): markers.append(my_class._read(fname, comment=comment)) else: raise ValueError('Come on--this is not a Nice class!') markers = Markers(markers) return markers
def _read_my_marker(klass, fname, comment='default'): # MANDATORY: This method should work for any marker as it is now. data = read_hdf5(fname, _get_title(klass, comment), slash='replace') init_params = {k: v for k, v in data.items() if not k.endswith('_')} attrs = {k: v for k, v in data.items() if k.endswith('_')} file_info = read_hdf5(fname, title='nice/data/ch_info', slash='replace') if 'filename' in file_info: del(file_info['filename']) attrs['ch_info_'] = Info(file_info) out = klass(**init_params) for k, v in attrs.items(): if k.endswith('_'): setattr(out, k, v) return out
def test_numpy_values(): tempdir = _TempDir() test_file = op.join(tempdir, 'test.hdf5') for cast in [np.int8, np.int16, np.int32, np.int64, np.bool_, np.float16, np.float32, np.float64]: value = cast(1) write_hdf5(test_file, value, title='first', overwrite='update') assert_equal(read_hdf5(test_file, 'first'), value)
def test_numpy_values(tmpdir): """Test NumPy values.""" test_file = op.join(str(tmpdir), 'test.hdf5') for cast in [ np.int8, np.int16, np.int32, np.int64, np.bool_, np.float16, np.float32, np.float64 ]: value = cast(1) write_hdf5(test_file, value, title='first', overwrite='update') assert_equal(read_hdf5(test_file, 'first'), value)
def test_hdf5_use_json(): """Test HDF5 IO """ tempdir = _TempDir() test_file = op.join(tempdir, 'test.hdf5') splash_dict = {'first/second': {'one/more': 'value'}} pytest.raises(ValueError, write_hdf5, test_file, splash_dict, overwrite=True, slash='error', use_json=True) spec_dict = {'first/second': 'third'} write_hdf5(test_file, spec_dict, overwrite=True, slash='replace', use_json=True) assert_equal( read_hdf5(test_file, slash='replace').keys(), spec_dict.keys()) in_keys = list(read_hdf5(test_file, slash='ignore').keys()) assert ('{FWDSLASH}' in in_keys[0]) comp_dict = {'first': [1, 2], 'second': 'str', 'third': {'a': 1}} write_hdf5(test_file, comp_dict, overwrite=True, use_json=True) assert_equal(sorted(read_hdf5(test_file, slash='replace').keys()), sorted(comp_dict.keys())) numpy_dict = {'first': np.array([1])} write_hdf5(test_file, numpy_dict, overwrite=True, use_json=True) assert_equal( list(read_hdf5(test_file, slash='replace').values())[0], list(numpy_dict.values())[0]) pytest.raises(ValueError, read_hdf5, test_file, slash='brains') # Testing that title slashes aren't replaced write_hdf5(test_file, spec_dict, title='one/two', overwrite=True, slash='replace', use_json=True) assert_equal( read_hdf5(test_file, title='one/two', slash='replace').keys(), spec_dict.keys())
def __getitem__(self, item): """ Get/ read data from the HDF5 file Args: item (str, slice): path to the data or key of the data object Returns: dict, list, float, int: data or data object """ if isinstance(item, slice): if not (item.start or item.stop or item.step): return self.values() raise NotImplementedError("Implement if needed, e.g. for [:]") else: item_lst = item.split("/") if len(item_lst) == 1 and item_lst[0] != "..": if item in self.list_nodes(): obj = h5io.read_hdf5(self.file_name, title=self._get_h5_path(item)) return obj if item in self.list_groups(): with self.open(item) as hdf_item: obj = hdf_item.copy() return obj raise ValueError("Unknown item: {} {} {}".format( item, self.file_name, self.h5_path)) else: if item_lst[ 0] == "": # item starting with '/', thus we have an absoute HDF5 path item_abs_lst = os.path.normpath(item).replace( "\\", "/").split("/") else: # relative HDF5 path # The self.h5_path is an absolute path (/h5_path/in/h5/file), however, to # reach any directory super to root, we start with a # relative path = ./h5_path/in/h5/file and add whatever we get as item. # The normpath finally returns a path to the item which is relative to the hdf-root. item_abs_lst = (os.path.normpath( os.path.join('.' + self.h5_path, item)).replace("\\", "/").split("/")) # print('h5_path=', self.h5_path, 'item=', item, 'item_abs_lst=', item_abs_lst) if (item_abs_lst[0] == "." and len(item_abs_lst) == 1): # Here, we are asked to return the root of the HDF5-file. The resulting self.path would be the # same as the self.file_path and, thus, the path of the pyiron Project this HDF5-file belongs to: return self.create_project_from_hdf5() elif item_abs_lst[0] == "..": # Here, we are asked to return a path super to the root of the HDF5-file, a.k.a. the path of it's # pyiron Project, thus we pass the relative path to the pyiron Project to handle it: return self.create_project_from_hdf5()["/".join( item_abs_lst)] else: hdf_object = self.copy() hdf_object.h5_path = "/".join(item_abs_lst[:-1]) return hdf_object[item_abs_lst[-1]]
def _read(self, item): """ Internal read function to read data from the HDF5 file Args: item (str): path to the data or key of the data object Returns: dict, list, float, int: data or data object """ return h5io.read_hdf5(self.file_name, title=self._get_h5_path(item))
def load(subject, event_type): # Behavior fname = op.join(path_data, subject, 'behavior_%s.hdf5' % event_type) events = read_hdf5(fname) # add explicit conditions events = complete_behavior(events) # MEG fname = op.join(path_data, subject, 'epochs_%s.fif' % event_type) epochs = mne.read_epochs(fname) return epochs, events
def test_timezone(name, tmpdir): """Test datetime.timezone support.""" fname = op.join(str(tmpdir), 'test.hdf5') kwargs = dict() if name is not None: kwargs['name'] = name x = datetime.timezone(datetime.timedelta(hours=-7), **kwargs) write_hdf5(fname, x) y = read_hdf5(fname) assert isinstance(y, datetime.timezone) assert y == x if name is not None: assert y.tzname(None) == name
def load(subject, event_type): fname = op.join(path_data, '%s/behavior_%s.hdf5') % (subject, event_type) events = read_hdf5(fname) # add explicit conditions events = complete_behavior(events) fname = op.join(path_data, '%s/epochs_%s.fif') % (subject, event_type) epochs = mne.read_epochs(fname) if event_type == 'Target': epochs.crop(0, .600) elif event_type == 'Cue': epochs.crop(0, .900) elif event_type == 'Probe': epochs.crop(0, .600) return epochs, events
def test_multi_dim_array(tmpdir): """Test multidimensional arrays.""" rng = np.random.RandomState(0) traj = np.array([rng.randn(2, 1), rng.randn(3, 1)]) test_file = op.join(str(tmpdir), 'test.hdf5') write_hdf5(test_file, traj, title='first', overwrite='update') for traj_read, traj_sub in zip(read_hdf5(test_file, 'first'), traj): assert (np.equal(traj_read, traj_sub).all()) traj_no_structure = np.array([rng.randn(2, 1, 1), rng.randn(3, 1, 2)]) pytest.raises(ValueError, write_hdf5, test_file, traj_no_structure, title='second', overwrite='update')
def test_datetime(tmpdir): """Test datetime.datetime support.""" fname = op.join(str(tmpdir), 'test.hdf5') # Naive y, m, d, h, m, s, mu = range(1, 8) dt = datetime.datetime(y, m, d, h, m, s, mu) for key in ('year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond'): val = locals()[key[:1] if key != 'microsecond' else 'mu'] assert val == getattr(dt, key) assert dt.year == y assert dt.month == m write_hdf5(fname, dt) dt2 = read_hdf5(fname) assert isinstance(dt2, datetime.datetime) assert dt == dt2 assert dt2.tzinfo is None # Aware dt = dt.replace(tzinfo=datetime.timezone.utc) write_hdf5(fname, dt, overwrite=True) dt2 = read_hdf5(fname) assert isinstance(dt2, datetime.datetime) assert dt == dt2 assert dt2.tzinfo is datetime.timezone.utc # Custom dt = dt.replace(tzinfo=XT()) write_hdf5(fname, dt, overwrite=True) dt2 = read_hdf5(fname) assert isinstance(dt2, datetime.datetime) assert dt == dt2 assert dt2.tzinfo is not None assert dt2.tzinfo is not datetime.timezone.utc for key in ('utcoffset', 'tzname', 'dst'): v1 = getattr(dt2.tzinfo, key)(None) v2 = getattr(dt.tzinfo, key)(None) assert v1 == v2
def test_h5_file_object(tmpdir): tempdir = str(tmpdir) test_file_path = op.join(tempdir, 'test1.hdf5') # test that wrong object type raises error pytest.raises(ValueError, write_hdf5, fname=33, data=1) # test that reading/writing are unaffected with h5py.File(test_file_path, 'a') as test_file_obj: data = {'a': 42} write_hdf5(test_file_obj, data) assert_equal(read_hdf5(test_file_obj), data) # test that wrong mode raises error with h5py.File(test_file_path, 'r') as test_file_obj: assert test_file_obj.mode == 'r' with pytest.raises(UnsupportedOperation): write_hdf5(test_file_obj, data=1)
def load(subject, event_type): # Behavior fname = op.join(path_data, subject, 'behavior_%s.hdf5' % event_type) events = read_hdf5(fname) # add explicit conditions events = complete_behavior(events) # MEG if target_baseline: fname = op.join(path_data, subject, 'epochs_tf_%s.fif' % event_type) # noqa else: fname = op.join(path_data, subject, 'epochs_tf_%s_bsl.fif' % event_type) # noqa epochs = mne.read_epochs(fname) # epochs.decimate(10) return epochs, events
def __getitem__(self, item): """ Get/ read data from the HDF5 file Args: item (str, slice): path to the data or key of the data object Returns: dict, list, float, int: data or data object """ if isinstance(item, slice): if not (item.start or item.stop or item.step): return self.values() raise NotImplementedError("Implement if needed, e.g. for [:]") else: item_lst = item.split("/") if len(item_lst) == 1 and item_lst[0] != "..": if item in self.list_nodes(): obj = h5io.read_hdf5(self.file_name, title=self._get_h5_path(item)) return obj if item in self.list_groups(): with self.open(item) as hdf_item: obj = hdf_item.copy() return obj raise ValueError("Unknown item: {}".format(item)) else: if item_lst[0] == "": # absoute HDF5 path item_abs_lst = os.path.normpath(item).replace( "\\", "/").split("/") else: # relative HDF5 path item_abs_lst = (os.path.normpath( os.path.join(self.h5_path, item)).replace("\\", "/").split("/")) # print(item, item_abs_lst) # item_abs_lst = os.path.normpath(os.path.join(self.h5_path, item)).replace('\\', '/').split("/") if (item_abs_lst[1] == "" and len(item_abs_lst) == 2): # leaving the HDF5 file return self._create_project_from_hdf5() elif item_abs_lst[1] == "": return self._create_project_from_hdf5()["/".join( item_abs_lst[2:])] else: hdf_object = self.copy() hdf_object.h5_path = "/".join(item_abs_lst[:-1]) return hdf_object[item_abs_lst[-1]]
def read_glm(fname): """ Read GLM results from disk. Parameters ---------- fname : str The file name, which should end with ``glm.h5``. Returns ------- glm : RegressionResults or ContrastResults RegressionResults or ContrastResults class which stores the GLM results. """ check_fname(fname, 'path-like', 'glm.h5') glm = read_hdf5(fname, title='mnepython') return _state_to_glm(glm)
def from_hdf(fname): """ Read an EpochsTFR from an HDF5 file. This expects an HDF5 file that was created with write_hdf5. It auto-populates a new EpochsTFR object. Be careful with memory consumption! Parameters ---------- fname : str The path to the HDF5 file you wish to import. Returns ------- etfr : EpochsTFR The EpochsTFR object contained in the HDF5 file. """ params = h5io.read_hdf5(fname) etfr = EpochTFR(**params) return etfr
def from_hdf(self): """ Restore input, output and the class definition from an HDF5 file - to maintain orthogonal persistence. """ job_dict = h5io.read_hdf5( os.path.join(self._working_directory, "scisweeper.h5")) if "input" in job_dict.keys(): self.input_dict = job_dict["input"] if "settings" in job_dict.keys(): self._executable = job_dict["settings"]["executable"] self._working_directory = job_dict["settings"]["working_directory"] if "NotImplementedError" in inspect.getsource(self.write_input): self._write_input_source = job_dict["settings"]["write_input"] self.write_input = self._str_to_obj(self._write_input_source) if "NotImplementedError" in inspect.getsource(self.collect_output): self._collect_output_source = job_dict["settings"][ "collect_output"] self.collect_output = self._str_to_obj( self._collect_output_source) if "output" in job_dict.keys(): self.output_dict = job_dict["output"]
def test_hdf5(): """Test HDF5 IO """ tempdir = _TempDir() test_file = op.join(tempdir, 'test.hdf5') sp = np.eye(3) if sparse is None else sparse.eye(3, 3, format='csc') df = np.eye(3) if isinstance(DataFrame, type(None)) else DataFrame( np.eye(3)) sr = np.eye(3) if isinstance(Series, type(None)) else Series( np.random.randn(3)) sp[2, 2] = 2 x = dict(a=dict(b=np.zeros(3)), c=np.zeros(2, np.complex128), d=[dict(e=(1, -2., 'hello', u'goodbyeu\u2764')), None], f=sp, g=dict(dfa=df, srb=sr)) write_hdf5(test_file, 1) assert_equal(read_hdf5(test_file), 1) assert_raises(IOError, write_hdf5, test_file, x) # file exists write_hdf5(test_file, x, overwrite=True) assert_raises(IOError, read_hdf5, test_file + 'FOO') # not found xx = read_hdf5(test_file) assert_true(object_diff(x, xx) == '') # no assert_equal, ugly output # bad title assert_raises(ValueError, read_hdf5, test_file, title='nonexist') assert_raises(ValueError, write_hdf5, test_file, x, overwrite=True, title=1) assert_raises(ValueError, read_hdf5, test_file, title=1) # unsupported objects assert_raises(TypeError, write_hdf5, test_file, {1: 'foo'}, overwrite=True) assert_raises(TypeError, write_hdf5, test_file, object, overwrite=True) write_hdf5(test_file, 1, title='first', overwrite=True) write_hdf5(test_file, 2, title='second', overwrite='update') assert_equal(read_hdf5(test_file, title='first'), 1) assert_equal(read_hdf5(test_file, title='second'), 2) assert_raises(IOError, write_hdf5, test_file, 3, title='second') write_hdf5(test_file, 3, title='second', overwrite='update') assert_equal(read_hdf5(test_file, title='second'), 3) write_hdf5(test_file, 5, title='second', overwrite='update', compression=5) assert_equal(read_hdf5(test_file, title='second'), 5)
def get_job_status_from_file(hdf5_file, job_name): if os.path.exists(hdf5_file): return h5io.read_hdf5(hdf5_file, job_name + "/status") else: return None
def get_job_status_from_file(hdf5_file, job_name): return h5io.read_hdf5(hdf5_file, job_name + '/status')
def get_hamilton_version_from_file(hdf5_file, job_name): return h5io.read_hdf5(hdf5_file, job_name + '/VERSION')
def test_hdf5(): """Test HDF5 IO """ tempdir = _TempDir() test_file = op.join(tempdir, 'test.hdf5') sp = np.eye(3) if sparse is None else sparse.eye(3, 3, format='csc') sp_csr = np.eye(3) if sparse is None else sparse.eye(3, 3, format='csr') df = np.eye(3) if isinstance(DataFrame, type(None)) else DataFrame( np.eye(3)) sr = np.eye(3) if isinstance(Series, type(None)) else Series( np.random.randn(3)) sp[2, 2] = 2 sp_csr[2, 2] = 2 x = dict(a=dict(b=np.zeros(3)), c=np.zeros(2, np.complex128), d=[dict(e=(1, -2., 'hello', u'goodbyeu\u2764')), None], f=sp, g=dict(dfa=df, srb=sr), h=sp_csr, i=sr, j='hi') write_hdf5(test_file, 1) assert_equal(read_hdf5(test_file), 1) assert_raises(IOError, write_hdf5, test_file, x) # file exists write_hdf5(test_file, x, overwrite=True) assert_raises(IOError, read_hdf5, test_file + 'FOO') # not found xx = read_hdf5(test_file) assert_true(object_diff(x, xx) == '') # no assert_equal, ugly output list_file_contents(test_file) # Testing the h5 listing assert_raises(TypeError, list_file_contents, sp) # Only string works write_hdf5(test_file, np.bool_(True), overwrite=True) assert_equal(read_hdf5(test_file), np.bool_(True)) # bad title assert_raises(ValueError, read_hdf5, test_file, title='nonexist') assert_raises(ValueError, write_hdf5, test_file, x, overwrite=True, title=1) assert_raises(ValueError, read_hdf5, test_file, title=1) # unsupported objects assert_raises(TypeError, write_hdf5, test_file, {1: 'foo'}, overwrite=True) assert_raises(TypeError, write_hdf5, test_file, object, overwrite=True) # special_chars spec_dict = {'first/second': 'third'} assert_raises(ValueError, write_hdf5, test_file, spec_dict, overwrite=True) assert_raises(ValueError, write_hdf5, test_file, spec_dict, overwrite=True, slash='brains') write_hdf5(test_file, spec_dict, overwrite=True, slash='replace') assert_equal( read_hdf5(test_file, slash='replace').keys(), spec_dict.keys()) in_keys = list(read_hdf5(test_file, slash='ignore').keys()) assert_true('{FWDSLASH}' in in_keys[0]) assert_raises(ValueError, read_hdf5, test_file, slash='brains') # Testing that title slashes aren't replaced write_hdf5( test_file, spec_dict, title='one/two', overwrite=True, slash='replace') assert_equal(read_hdf5(test_file, title='one/two', slash='replace').keys(), spec_dict.keys()) write_hdf5(test_file, 1, title='first', overwrite=True) write_hdf5(test_file, 2, title='second', overwrite='update') assert_equal(read_hdf5(test_file, title='first'), 1) assert_equal(read_hdf5(test_file, title='second'), 2) assert_raises(IOError, write_hdf5, test_file, 3, title='second') write_hdf5(test_file, 3, title='second', overwrite='update') assert_equal(read_hdf5(test_file, title='second'), 3) write_hdf5(test_file, 5, title='second', overwrite='update', compression=5) assert_equal(read_hdf5(test_file, title='second'), 5)
def __getitem__(self, item): """ Get/ read data from the HDF5 file Args: item (str, slice): path to the data or key of the data object Returns: dict, list, float, int: data or data object """ if isinstance(item, slice): if not (item.start or item.stop or item.step): return self.values() raise NotImplementedError("Implement if needed, e.g. for [:]") else: try: # fast path, a good amount of accesses will want to fetch a specific dataset it knows exists in the # file, there's therefor no point in checking whether item is a group or a node or even worse recursing # in case when item contains '/'. In most cases read_hdf5 will grab the correct data straight away and # if not we will still check thoroughly below. Since list_nodes()/list_groups() each open the # underlying file once, this reduces the number of file opens in the most-likely case from 2 to 1 (1 to # check whether the data is there and 1 to read it) and increases in the worst case from 1 to 2 (1 to # try to read it here and one more time to verify it's not a group below). obj = h5io.read_hdf5(self.file_name, title=self._get_h5_path(item)) if self._is_convertable_dtype_object_array(obj): obj = self._convert_dtype_obj_array(obj.copy()) return obj except (ValueError, OSError): # h5io couldn't find a dataset with name item, but there still might be a group with that name, which we # check in the rest of the method pass item_lst = item.split("/") if len(item_lst) == 1 and item_lst[0] != "..": # if item in self.list_nodes() we would have caught it in the fast path above if item in self.list_groups(): with self.open(item) as hdf_item: obj = hdf_item.copy() if self._is_convertable_dtype_object_array(obj): obj = self._convert_dtype_obj_array(obj) return obj raise ValueError("Unknown item: {} {} {}".format( item, self.file_name, self.h5_path)) else: if ( item_lst[0] == "" ): # item starting with '/', thus we have an absoute HDF5 path item_abs_lst = os.path.normpath(item).replace( "\\", "/").split("/") else: # relative HDF5 path # The self.h5_path is an absolute path (/h5_path/in/h5/file), however, to # reach any directory super to root, we start with a # relative path = ./h5_path/in/h5/file and add whatever we get as item. # The normpath finally returns a path to the item which is relative to the hdf-root. item_abs_lst = (os.path.normpath( os.path.join("." + self.h5_path, item)).replace("\\", "/").split("/")) # print('h5_path=', self.h5_path, 'item=', item, 'item_abs_lst=', item_abs_lst) if item_abs_lst[0] == "." and len(item_abs_lst) == 1: # Here, we are asked to return the root of the HDF5-file. The resulting self.path would be the # same as the self.file_path and, thus, the path of the pyiron Project this HDF5-file belongs to: return self.create_project_from_hdf5() elif item_abs_lst[0] == "..": # Here, we are asked to return a path super to the root of the HDF5-file, a.k.a. the path of it's # pyiron Project, thus we pass the relative path to the pyiron Project to handle it: return self.create_project_from_hdf5()["/".join( item_abs_lst)] else: hdf_object = self.copy() hdf_object.h5_path = "/".join(item_abs_lst[:-1]) return hdf_object[item_abs_lst[-1]]
epochs = mne.read_epochs(fname2) epochs.info['dev_head_t'] = dev_head_t if subject == 'sub01_YFAALKWR_JA': # miss button 2 recordings on # 1st session for s01 epochs.drop_channels(['UADC007-2104']) epochs_list.append(epochs) epochs = concatenate_epochs(epochs_list) fname = op.join(path_data, subject, 'epochs_%s%s.fif' % (event_type, suffix)) epochs.save(fname) # delete epoch-1 and epoch-2 to keep only the concatenate one os.remove(fname1) os.remove(fname2) # concatenate behavior files fname1 = op.join(path_data, subject, 'behavior_%s_1.hdf5' % event_type) events1 = read_hdf5(fname1) fname2 = op.join(path_data, subject_2, 'behavior_%s_2.hdf5' % event_type) events2 = read_hdf5(fname2) frames = [events1, events2] events = pd.concat(frames, axis=0) fname = op.join(path_data, subject, 'behavior_%s.hdf5' % event_type) write_hdf5(fname, events, overwrite=True) # if only one session has been acquired (one subject) else: fname1 = op.join(path_data, subject, 'epochs_%s%s_1.fif' % (event_type, suffix)) fname = op.join(path_data, subject, 'epochs_%s%s.fif' % (event_type, suffix)) shutil.copy(fname1, fname) os.remove(fname1)
def test_hdf5(tmpdir): """Test HDF5 IO.""" tempdir = str(tmpdir) test_file = op.join(tempdir, 'test.hdf5') sp = np.eye(3) if sparse is None else sparse.eye(3, 3, format='csc') sp_csr = np.eye(3) if sparse is None else sparse.eye(3, 3, format='csr') df = np.eye(3) if isinstance(DataFrame, type(None)) else DataFrame( np.eye(3)) sr = np.eye(3) if isinstance(Series, type(None)) else Series( np.random.randn(3)) sp[2, 2] = 2 sp_csr[2, 2] = 2 x = dict(a=dict(b=np.zeros(3)), c=np.zeros(2, np.complex128), d=[dict(e=(1, -2., 'hello', u'goodbyeu\u2764')), None], f=sp, g=dict(dfa=df, srb=sr), h=sp_csr, i=sr, j='hi') write_hdf5(test_file, 1) assert_equal(read_hdf5(test_file), 1) pytest.raises(IOError, write_hdf5, test_file, x) # file exists write_hdf5(test_file, x, overwrite=True) pytest.raises(IOError, read_hdf5, test_file + 'FOO') # not found xx = read_hdf5(test_file) assert (object_diff(x, xx) == '') # no assert_equal, ugly output list_file_contents(test_file) # Testing the h5 listing pytest.raises(TypeError, list_file_contents, sp) # Only string works write_hdf5(test_file, np.bool_(True), overwrite=True) assert_equal(read_hdf5(test_file), np.bool_(True)) # bad title pytest.raises(ValueError, read_hdf5, test_file, title='nonexist') pytest.raises(ValueError, write_hdf5, test_file, x, overwrite=True, title=1) pytest.raises(ValueError, read_hdf5, test_file, title=1) # unsupported objects pytest.raises(TypeError, write_hdf5, test_file, {1: 'foo'}, overwrite=True) pytest.raises(TypeError, write_hdf5, test_file, object, overwrite=True) # special_chars spec_dict = {'first/second': 'third'} pytest.raises(ValueError, write_hdf5, test_file, spec_dict, overwrite=True) pytest.raises(ValueError, write_hdf5, test_file, spec_dict, overwrite=True, slash='brains') write_hdf5(test_file, spec_dict, overwrite=True, slash='replace') assert_equal( read_hdf5(test_file, slash='replace').keys(), spec_dict.keys()) in_keys = list(read_hdf5(test_file, slash='ignore').keys()) assert ('{FWDSLASH}' in in_keys[0]) pytest.raises(ValueError, read_hdf5, test_file, slash='brains') # Testing that title slashes aren't replaced write_hdf5(test_file, spec_dict, title='one/two', overwrite=True, slash='replace') assert_equal( read_hdf5(test_file, title='one/two', slash='replace').keys(), spec_dict.keys()) write_hdf5(test_file, 1, title='first', overwrite=True) write_hdf5(test_file, 2, title='second', overwrite='update') assert_equal(read_hdf5(test_file, title='first'), 1) assert_equal(read_hdf5(test_file, title='second'), 2) pytest.raises(IOError, write_hdf5, test_file, 3, title='second') write_hdf5(test_file, 3, title='second', overwrite='update') assert_equal(read_hdf5(test_file, title='second'), 3) write_hdf5(test_file, 5, title='second', overwrite='update', compression=5) assert_equal(read_hdf5(test_file, title='second'), 5)
def get_hamilton_from_file(hdf5_file, job_name): return h5io.read_hdf5(hdf5_file, job_name + '/TYPE').split(".")[-1].split("'")[0]
freqs = ['DC', 'delta', 'theta', 'alpha', 'beta', 'gamma'] labels = mne.read_labels_from_annot('fsaverage', 'aparc_sub') labels = [label for label in labels if 'unknown' not in label.name] ############################################################################### # Load data # --------- X, y = [list() for _ in range(len(ages))], list() for ai, age in enumerate(ages): shape = None for mi, measure in enumerate(measures): fast_fname = 'genz_%s_%s_fast.h5' % (age, measure) if not op.isfile(fast_fname): print('Converting %s measure %s' % (age, measure)) data = read_hdf5('genz_%s_%s.h5' % (age, measure)) data = data['data_vars'][measure]['data'] data = np.array(data) assert data.dtype == np.float write_hdf5(fast_fname, data) data = read_hdf5(fast_fname) if shape is None: shape = data.shape assert shape[-1] == 2 assert data.shape == shape assert data.ndim == 4 #data = data[freq_idx] # only use these freqs # deal with reordering (undo it to restore original order) order = np.argsort(data[:, :, :, 0], axis=-1) data = data[..., 1] for ii in range(data.shape[0]):
plt.rcParams['font.sans-serif'] = ['Arial'] plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['xtick.labelsize'] = 16 plt.rcParams['ytick.labelsize'] = 16 all_correct = list() cueangle_correct = list() cuesfreq_correct = list() cueleft_correct = list() cueright_correct = list() for subject in subjects: # Read behav file (hdf5) print '**********' + subject + '************' fname = op.join(path_data, subject, 'behavior_target.hdf5') events = read_hdf5(fname) events = complete_behavior(events) # Select behav perf on all trials isfixed = np.where(events['is_eye_fixed'] == 1) iscorrect = np.array(events['is_correct']) iscorrect_fixed = iscorrect[isfixed] if len(iscorrect_fixed) != 800: warnings.warn("Total isfixed trial is not 800") print 'total is:' + str(len(iscorrect_fixed)) perc = sum(iscorrect_fixed) / len(iscorrect_fixed) all_correct.append(perc) # behav perf only cue angle cue_angle = np.where(events['cue_type'] == 'angle') if len(cue_angle[0]) != 400: warnings.warn("Total trial with cue angle is not 400")