def writer(self, h5parent, writer, scan, nxclass=None, *args, **kws): '''Describe how to store this data in an HDF5 NeXus file''' if hasattr(scan, 'MCA'): if hasattr(scan.MCA, 'ROI'): mca_group = openGroup(h5parent, 'MCA', nxclass, description='MCA metadata') roi_group = openGroup(mca_group, 'ROI', nxclass, description='Regions Of Interest') roi_dict = scan.MCA['ROI'] for key, roi in roi_dict.items(): dataset = [roi['first_chan'], roi['last_chan']] desc = 'first_chan, last_chan' write_dataset(roi_group, key, dataset, description=desc, units='channel')
def writer(self, h5parent, writer, scan, nxclass=None, *args, **kws): '''Describe how to store this data in an HDF5 NeXus file''' if hasattr(scan, 'MCA'): mca_group = openGroup(h5parent, 'MCA', nxclass, description='MCA metadata') mca = scan.MCA for key in ('preset_time elapsed_live_time elapsed_real_time'.split()): if key in mca: write_dataset(mca_group, key, mca[key], units='s')
def writer(self, h5parent, writer, scan, nxclass=None, *args, **kws): '''Describe how to store this data in an HDF5 NeXus file''' if hasattr(scan, 'MCA'): mca_group = openGroup(h5parent, 'MCA', nxclass, description='MCA metadata') mca = scan.MCA for key in ('number_saved first_saved last_saved reduction_coef'.split()): if key in mca: write_dataset(mca_group, key, mca[key])
def writer(self, h5parent, writer, scan, nxclass=None, *args, **kws): '''Describe how to store this data in an HDF5 NeXus file''' if hasattr(scan, 'MCA'): if 'CALIB' in scan.MCA: mca_group = openGroup(h5parent, 'MCA', nxclass, description='MCA metadata') calib_dict = scan.MCA['CALIB'] for key in ('a b c'.split()): if key in calib_dict: write_dataset(mca_group, 'calib_' + key, calib_dict[key])
def writer(self, h5parent, writer, scan, nxclass=None, *args, **kws): '''Describe how to store this data in an HDF5 NeXus file''' if hasattr(scan, 'MCA'): mca_group = openGroup(h5parent, 'MCA', nxclass, description='MCA metadata') mca = scan.MCA for key in ('preset_time elapsed_live_time elapsed_real_time'. split()): if key in mca: write_dataset(mca_group, key, mca[key], units='s')
def writer(self, h5parent, writer, scan, nxclass=None, *args, **kws): '''Describe how to store this data in an HDF5 NeXus file''' if hasattr(scan, 'MCA'): mca_group = openGroup(h5parent, 'MCA', nxclass, description='MCA metadata') mca = scan.MCA for key in ('number_saved first_saved last_saved reduction_coef' .split()): if key in mca: write_dataset(mca_group, key, mca[key])
def save(self, hfile=None, key=None): ''' save the reduced data group to an HDF5 file, return filename or None if not written :param str hfile: output HDF5 file name (default: input HDF5 file) :param str key: name of reduced data set (default: nothing will be saved) By default, save to the input HDF5 file. To override this, specify the output HDF5 file name when calling this method. * If the file exists, this will not overwrite any input data. * Full, reduced :math:`R(Q)` goes into NXdata group:: /entry/flyScan_reduced_full * any previous full reduced :math:`R(Q)` will be replaced. * It may replace the rebinned, reduced :math:`R(Q)` if a NXdata group of the same number of bins exists. * Rebinned, reduced :math:`R(Q)` goes into NXdata group:: /entry/flyScan_reduced_<N> where ``<N>`` is the number of bins, such as (for 500 bins):: /entry/flyScan_reduced_500 :see: http://download.nexusformat.org/doc/html/classes/base_classes/NXentry.html :see: http://download.nexusformat.org/doc/html/classes/base_classes/NXdata.html ''' # TODO: save with NXprocess/NXdata structure # TODO: link that NXdata up to NXentry level # TODO: change /NXentry@default to point to best NXdata reduced # TODO: What about NXcanSAS? key = str(key) if key not in self.reduced: return nxname = 'flyScan_reduced_' + key hfile = hfile or self.hdf5_file_name ds = self.reduced[key] with h5py.File(hfile, 'a') as hdf: if 'default' not in hdf.attrs: hdf.attrs['default'] = 'entry' nxentry = eznx.openGroup(hdf, 'entry', 'NXentry') if 'default' not in nxentry.attrs: nxentry.attrs['default'] = nxname nxdata = eznx.openGroup( nxentry, nxname, 'NXdata', signal='R', axes='Q', Q_indices=0, timestamp=calc.iso8601_datetime(), ) for key in sorted(ds.keys()): try: _ds = eznx.write_dataset(nxdata, key, ds[key]) if key in self.units: eznx.addAttributes(_ds, units=self.units[key]) except RuntimeError as e: pass # TODO: reporting return hfile
def save(self, hfile = None, key = None): ''' save the reduced data group to an HDF5 file, return filename or None if not written :param str hfile: output HDF5 file name (default: input HDF5 file) :param str key: name of reduced data set (default: nothing will be saved) By default, save to the input HDF5 file. To override this, specify the output HDF5 file name when calling this method. * If the file exists, this will not overwrite any input data. * Full, reduced :math:`R(Q)` goes into NXdata group:: /entry/areaDetector_reduced_full * any previous full reduced :math:`R(Q)` will be replaced. * It may replace the rebinned, reduced :math:`R(Q)` if a NXdata group of the same number of bins exists. * Rebinned, reduced :math:`R(Q)` goes into NXdata group:: /entry/areaDetector_reduced_<N> where ``<N>`` is the number of bins, such as (for 500 bins):: /entry/areaDetector_reduced_500 :see: http://download.nexusformat.org/doc/html/classes/base_classes/NXentry.html :see: http://download.nexusformat.org/doc/html/classes/base_classes/NXdata.html ''' key = str(key) if key not in self.reduced: return nxname = 'areaDetector_reduced_' + key hfile = hfile or self.hdf5_file_name ds = self.reduced[key] try: hdf = h5py.File(hfile, 'a') except IOError as _exc: # FIXME: some h5py problem in <h5py>/_hl/files.py, line 101 # this fails: fid = h5f.open(name, h5f.ACC_RDWR, fapl=fapl) # with IOError that is improperly caught on next and then: # fid = h5f.create(name, h5f.ACC_EXCL, fapl=fapl, fcpl=fcpl) fails with IOError # since the second call has "name" with all lower case # # real problem is that these HDF5 files have the wrong uid/gid, as set by the Pilatus computer # TODO: fix each Pilatus and this problem will go away # TODO: change uid/gid on all the acquired HDF5 files (*.h5, *.hdf) under usaxscontrol:/share1/USAXS_data/2* # Files should be owned by usaxs:usaxs (1810:2026), but are owned by tomo2:usaxs (500:2026) as seen by usaxs@usaxscontrol # not enough to change the "umask" on the det@dec1122 computer, what else will fix this? pvwatch.logMessage( "Problem writing reduced data back to file: " + hfile ) return if 'default' not in hdf.attrs: hdf.attrs['default'] = 'entry' nxentry = eznx.openGroup(hdf, 'entry', 'NXentry') if 'default' not in nxentry.attrs: nxentry.attrs['default'] = nxname nxdata = eznx.openGroup(nxentry, nxname, 'NXdata', signal='R', axes='Q', Q_indices=0, timestamp=calc.iso8601_datetime(), ) for key in sorted(ds.keys()): try: _ds = eznx.write_dataset(nxdata, key, ds[key]) if key in self.units: eznx.addAttributes(_ds, units=self.units[key]) except RuntimeError as e: pass # TODO: reporting hdf.close() return hfile