def append_member_to_h5(self, h5like_name, obj): """Add one group or one dataset to :attr:`h5f`""" h5_name = self.h5path + h5like_name.lstrip("/") if is_softlink(obj): # links to be created after all groups and datasets h5_target = self.h5path + obj.path.lstrip("/") self._links.append((h5_name, h5_target)) elif is_dataset(obj): _logger.debug("Saving dataset: " + h5_name) member_initially_exists = h5_name in self._h5f if self.overwrite_data and member_initially_exists: _logger.warning("Overwriting dataset: " + h5_name) del self._h5f[h5_name] if self.overwrite_data or not member_initially_exists: if fabioh5 is not None and \ isinstance(obj, fabioh5.FrameData) and \ len(obj.shape) > 2: # special case of multiframe data # write frame by frame to save memory usage low ds = self._h5f.create_dataset(h5_name, shape=obj.shape, dtype=obj.dtype, **self.create_dataset_args) for i, frame in enumerate(obj): ds[i] = frame else: # fancy arguments don't apply to small dataset if obj.size < self.min_size: ds = self._h5f.create_dataset(h5_name, data=obj.value) else: ds = self._h5f.create_dataset(h5_name, data=obj.value, **self.create_dataset_args) else: ds = self._h5f[h5_name] # add HDF5 attributes for key in obj.attrs: if self.overwrite_data or key not in ds.attrs: ds.attrs.create(key, _attr_utf8(obj.attrs[key])) if not self.overwrite_data and member_initially_exists: _logger.warning("Not overwriting existing dataset: " + h5_name) elif is_group(obj): if h5_name not in self._h5f: _logger.debug("Creating group: " + h5_name) grp = self._h5f.create_group(h5_name) else: grp = self._h5f[h5_name] # add HDF5 attributes for key in obj.attrs: if self.overwrite_data or key not in grp.attrs: grp.attrs.create(key, _attr_utf8(obj.attrs[key]))
def isDataset(item): if isinstance(item, Dataset): return True elif is_dataset(item): return True else: return False
def append_member_to_h5(self, h5like_name, obj): """Add one group or one dataset to :attr:`h5f`""" h5_name = self.h5path + h5like_name.lstrip("/") if is_softlink(obj): # links to be created after all groups and datasets h5_target = self.h5path + obj.path.lstrip("/") self._links.append((h5_name, h5_target)) elif is_dataset(obj): _logger.debug("Saving dataset: " + h5_name) member_initially_exists = h5_name in self._h5f if self.overwrite_data and member_initially_exists: _logger.warning("Overwriting dataset: " + h5_name) del self._h5f[h5_name] if self.overwrite_data or not member_initially_exists: if fabioh5 is not None and \ isinstance(obj, fabioh5.FrameData) and \ len(obj.shape) > 2: # special case of multiframe data # write frame by frame to save memory usage low ds = self._h5f.create_dataset(h5_name, shape=obj.shape, dtype=obj.dtype, **self.create_dataset_args) for i, frame in enumerate(obj): ds[i] = frame else: # fancy arguments don't apply to small dataset if obj.size < self.min_size: ds = self._h5f.create_dataset(h5_name, data=obj.value) else: ds = self._h5f.create_dataset( h5_name, data=obj.value, **self.create_dataset_args) else: ds = self._h5f[h5_name] # add HDF5 attributes for key in obj.attrs: if self.overwrite_data or key not in ds.attrs: ds.attrs.create(key, _attr_utf8(obj.attrs[key])) if not self.overwrite_data and member_initially_exists: _logger.warning("Not overwriting existing dataset: " + h5_name) elif is_group(obj): if h5_name not in self._h5f: _logger.debug("Creating group: " + h5_name) grp = self._h5f.create_group(h5_name) else: grp = self._h5f[h5_name] # add HDF5 attributes for key in obj.attrs: if self.overwrite_data or key not in grp.attrs: grp.attrs.create(key, _attr_utf8(obj.attrs[key]))
def _loadFromFile(self): stack = self.getStackDataObject() if stack is None: return mcaIndex = stack.info.get('McaIndex') if not (mcaIndex in [0, -1, 2]): raise IndexError("1D index must be 0, 2, or -1") # test io dependencies if h5py is None: filefilter = [] else: filefilter = ['HDF5 (*.h5 *.nxs *.hdf *.hdf5)'] filefilter.append('CSV (*.csv *.txt)') if silx_open is not None: filefilter.append('Any (*)') filename, ffilter = PyMcaFileDialogs.\ getFileList(parent=None, filetypelist=filefilter, message='Load', mode='OPEN', single=True, getfilter=True, currentfilter=filefilter[0]) if len(filename): if DEBUG: print("file name = %s file filter = %s" % (filename, ffilter)) else: if DEBUG: print("nothing selected") return filename = filename[0] positioners = {} if not ffilter.startswith('CSV'): h5GroupName = getGroupNameDialog(filename) if h5GroupName is None: return with h5open(filename) as h5f: h5Group = h5f[h5GroupName] positioners = {} for dsname in h5Group: # links and subgroups just ignored for the time being if not is_dataset(h5Group[dsname]): continue positioners[dsname] = h5Group[dsname][()] else: sf = specfilewrapper.Specfile(filename) scan = sf[0] labels = scan.alllabels() data = scan.data() scan = None sf = None for i, label in enumerate(labels): positioners[label] = data[i, :] self._stackWindow.setPositioners(positioners)
def _loadFromFile(self): stack = self.getStackDataObject() if stack is None: return mcaIndex = stack.info.get('McaIndex') if not (mcaIndex in [0, -1, 2]): raise IndexError("1D index must be 0, 2, or -1") # test io dependencies if h5py is None: filefilter = [] else: filefilter = ['HDF5 (*.h5 *.nxs *.hdf *.hdf5)'] filefilter.append('CSV (*.csv *.txt)') if silx_open is not None: filefilter.append('Any (*)') filename, ffilter = PyMcaFileDialogs.\ getFileList(parent=None, filetypelist=filefilter, message='Load', mode='OPEN', single=True, getfilter=True, currentfilter=filefilter[0]) if len(filename): _logger.debug("file name = %s file filter = %s", filename, ffilter) else: _logger.debug("nothing selected") return filename = filename[0] positioners = {} if not ffilter.startswith('CSV'): h5GroupName = getGroupNameDialog(filename) if h5GroupName is None: return with h5open(filename) as h5f: h5Group = h5f[h5GroupName] positioners = {} for dsname in h5Group: # links and subgroups just ignored for the time being if not is_dataset(h5Group[dsname]): continue positioners[dsname] = h5Group[dsname][()] else: sf = specfilewrapper.Specfile(filename) scan = sf[0] labels = scan.alllabels() data = scan.data() scan = None sf = None for i, label in enumerate(labels): positioners[label] = data[i, :] self._stackWindow.setPositioners(positioners)
def validate_auxiliary_signals(group, signal_name, auxiliary_signals_names): """Check data dimensionality and size. Return False if invalid.""" issues = [] for asn in auxiliary_signals_names: if asn not in group or not is_dataset(group[asn]): issues.append("Cannot find auxiliary signal dataset '%s'" % asn) elif group[signal_name].shape != group[asn].shape: issues.append("Auxiliary signal dataset '%s' does not" % asn + " have the same shape as the main signal.") return issues
def validate_auxiliary_signals(group, signal_name, auxiliary_signals_names): """Check data dimensionality and size. Return False if invalid.""" issues = [] for asn in auxiliary_signals_names: if asn not in group or not is_dataset(group[asn]): issues.append( "Cannot find auxiliary signal dataset '%s'" % asn) elif group[signal_name].shape != group[asn].shape: issues.append("Auxiliary signal dataset '%s' does not" % asn + " have the same shape as the main signal.") return issues
def append_member_to_h5(self, h5like_name, obj): """Add one group or one dataset to :attr:`h5f`""" h5_name = self.h5path + h5like_name.lstrip("/") if is_softlink(obj): # links to be created after all groups and datasets h5_target = self.h5path + obj.path.lstrip("/") self._links.append((h5_name, h5_target)) elif is_dataset(obj): _logger.debug("Saving dataset: " + h5_name) member_initially_exists = h5_name in self._h5f if self.overwrite_data and member_initially_exists: _logger.warn("Overwriting dataset: " + h5_name) del self._h5f[h5_name] if self.overwrite_data or not member_initially_exists: # fancy arguments don't apply to small dataset if obj.size < self.min_size: ds = self._h5f.create_dataset(h5_name, data=obj.value) else: ds = self._h5f.create_dataset(h5_name, data=obj.value, **self.create_dataset_args) else: ds = self._h5f[h5_name] # add HDF5 attributes for key in obj.attrs: if self.overwrite_data or key not in ds.attrs: ds.attrs.create(key, _attr_utf8(obj.attrs[key])) if not self.overwrite_data and member_initially_exists: _logger.warn("Not overwriting existing dataset: " + h5_name) elif is_group(obj): if h5_name not in self._h5f: _logger.debug("Creating group: " + h5_name) grp = self._h5f.create_group(h5_name) else: grp = self._h5f[h5_name] # add HDF5 attributes for key in obj.attrs: if self.overwrite_data or key not in grp.attrs: grp.attrs.create(key, _attr_utf8(obj.attrs[key]))
def visit_function(name, obj): if is_dataset(obj): append = False forget = False namebased = False for key, value in obj.attrs.items(): if key == "interpretation": if value in ["spectrum", b"spectrum"]: append = True else: forget = True if (not append) and (not forget): #support (risky) name based solutions too. # the dataset name starts with MCA or # the parent group starts with MCA if posixpath.basename(name).lower().startswith("mca") or \ posixpath.basename(posixpath.dirname(name)).lower().startswith("mca"): append = True namebased = True if append: # an actual MCA spectrum will have more than one channel if (not namebased) and ("measurement" in name): # ALBA sets the interpretation attribute to spectrum # to every counter in the measurement group if len(obj.shape) == 1: # I have to figure out if in fact it is just a # misuse of the interpretation attribute posnames = getScannedPositioners(h5file, path) for motor in posnames: if h5file[motor].size == obj.size: append = False if append: # perform some name filtering if posixpath.basename(obj.name).lower() in ignore: append = False if append: # the measurement group if len(obj.shape) > 0: if obj.shape[-1] > 1: if dataset: datasetList.append(obj) else: datasetList.append(obj.name)