def process_SAScollimation(self, xml_parent, nx_parent): ''' process any SAScollimation groups Should these be NXslit instead? ''' xml_node_list = xml_parent.findall('cs:SAScollimation', self.ns) for i, sas_group in enumerate(xml_node_list): if isinstance(sas_group.tag, str): # avoid XML Comments nm = sas_group.attrib.get('name', 'sascollimation') nm_clean = self.unique_name(nm, nx_parent) nxcoll = eznx.makeGroup(nx_parent, nm_clean, 'NXcollimator', canSAS_class='SAScollimation', canSAS_name=nm) # note: canSAS aperture does not map well into NXcollimator # might be better under SASinstrument but this is the defined location self.process_aperture(sas_group, nxcoll) for xmlnode in sas_group: if isinstance(xmlnode.tag, str): # avoid XML Comments if xmlnode.tag.endswith('}length'): ds = self.field_float(xmlnode, nxcoll) comment = 'Amount/length of collimation inserted (on a SANS instrument)' eznx.addAttributes(ds, comment=comment) elif xmlnode.tag.endswith('}aperture'): pass # handled above else: self.process_unexpected_xml_element( xmlnode, nxcoll)
def process_SAScollimation(self, xml_parent, nx_parent): ''' process any SAScollimation groups Should these be NXslit instead? ''' xml_node_list = xml_parent.findall('cs:SAScollimation', self.ns) for i, sas_group in enumerate(xml_node_list): if isinstance(sas_group.tag, str): # avoid XML Comments nm = sas_group.attrib.get('name', 'sascollimation') nm_clean = self.unique_name(nm, nx_parent) nxcoll = eznx.makeGroup(nx_parent, nm_clean, 'NXcollimator', canSAS_class='SAScollimation', canSAS_name=nm) # note: canSAS aperture does not map well into NXcollimator # might be better under SASinstrument but this is the defined location self.process_aperture(sas_group, nxcoll) for xmlnode in sas_group: if isinstance(xmlnode.tag, str): # avoid XML Comments if xmlnode.tag.endswith('}length'): ds = self.field_float(xmlnode, nxcoll) comment = 'Amount/length of collimation inserted (on a SANS instrument)' eznx.addAttributes(ds, comment=comment) elif xmlnode.tag.endswith('}aperture'): pass # handled above else: self.process_unexpected_xml_element(xmlnode, nxcoll)
def _prepare_to_acquire(self): '''connect to EPICS and create the HDF5 file and structure''' # connect to EPICS PVs for pv_spec in pv_registry.values(): pv_spec.pv = epics.PV(pv_spec.pvname) # create the file for key, xture in sorted(group_registry.items()): if key == '/': # create the file and internal structure f = eznx.makeFile( self.hdf5_file_name, # the following are attributes to the root element of the HDF5 file file_name=self.hdf5_file_name, creator=__file__, creator_version=self.creator_version, creator_config_file=self.config_file, HDF5_Version=h5py.version.hdf5_version, h5py_version=h5py.version.version, ) xture.hdf5_group = f else: hdf5_parent = xture.group_parent.hdf5_group xture.hdf5_group = eznx.makeGroup(hdf5_parent, xture.name, xture.nx_class) eznx.addAttributes(xture.hdf5_group, **xture.attrib) for field in field_registry.values(): ds = eznx.makeDataset(field.group_parent.hdf5_group, field.name, [field.text]) eznx.addAttributes(ds, **field.attrib)
def copy_attributes(self, xml_parent, nx_parent): ''' copy any XML attributes to the HDF5 object ''' eznx.addAttributes(nx_parent, **{k: v for k, v in xml_parent.attrib.items()})
def process_collection_group(self, xml_parent, nx_parent): ''' process any collection group XML element In NXcollection, the content does not have to be NeXus. Could use plain hdf5 groups and datasets, both with attributes. But, it's more consistent to stay in NeXus structures, so nest NXcollections. ''' if len(xml_parent ) == 0: # just a text field, don't assume it is a number tag = ns_strip(xml_parent) nm = self.unique_hdf5_name(nx_parent, xml_parent, tag) ds = self.field_text(xml_parent, nx_parent, node_name=nm) if ds is not None: eznx.addAttributes(ds, tag=tag) self.copy_attributes(xml_parent, nx_parent) else: for xmlnode in xml_parent: if len(xmlnode) == 0: self.process_collection_group(xmlnode, nx_parent) else: tag = ns_strip(xmlnode) nm = xmlnode.attrib.get('name', tag) nm = self.unique_hdf5_name(nx_parent, xmlnode, nm) nm_clean = self.unique_name(nm, nx_parent) nxgroup = eznx.makeGroup(nx_parent, nm_clean, 'NXcollection', canSAS_name=nm) self.copy_attributes(xmlnode, nxgroup) eznx.addAttributes(nxgroup, tag=tag) self.process_collection_group(xmlnode, nxgroup)
def preliminaryWriteFile(self): '''write all preliminary data to the file while fly scan is running''' for pv_spec in pv_registry.values(): if pv_spec.acquire_after_scan: continue if pv_spec.as_string: value = pv_spec.pv.get(as_string=True) else: value = pv_spec.pv.get() if value is [None]: value = 'no data' if not isinstance(value, numpy.ndarray): value = [value] else: if pv_spec.length_limit and pv_spec.length_limit in pv_registry: length_limit = pv_registry[pv_spec.length_limit].pv.get() if len(value) > length_limit: value = value[:length_limit] hdf5_parent = pv_spec.group_parent.hdf5_group try: ds = eznx.makeDataset(hdf5_parent, pv_spec.label, value) self._attachEpicsAttributes(ds, pv_spec.pv) eznx.addAttributes(ds, **pv_spec.attrib) except Exception as e: print "ERROR: ", pv_spec.label, value print "MESSAGE: ", e print "RESOLUTION: writing as error message string" eznx.makeDataset(hdf5_parent, pv_spec.label, [str(e)])
def process_collection_group(self, xml_parent, nx_parent): ''' process any collection group XML element In NXcollection, the content does not have to be NeXus. Could use plain hdf5 groups and datasets, both with attributes. But, it's more consistent to stay in NeXus structures, so nest NXcollections. ''' if len(xml_parent) == 0: # just a text field, don't assume it is a number tag = ns_strip(xml_parent) nm = self.unique_hdf5_name(nx_parent, xml_parent, tag) ds = self.field_text(xml_parent, nx_parent, node_name=nm) if ds is not None: eznx.addAttributes(ds, tag = tag) self.copy_attributes(xml_parent, nx_parent) else: for xmlnode in xml_parent: if len(xmlnode) == 0: self.process_collection_group(xmlnode, nx_parent) else: tag = ns_strip(xmlnode) nm = xmlnode.attrib.get('name', tag) nm = self.unique_hdf5_name(nx_parent, xmlnode, nm) nm_clean = self.unique_name(nm, nx_parent) nxgroup = eznx.makeGroup(nx_parent, nm_clean, 'NXcollection', canSAS_name=nm) self.copy_attributes(xmlnode, nxgroup) eznx.addAttributes(nxgroup, tag=tag) self.process_collection_group(xmlnode, nxgroup)
def _attachEpicsAttributes(self, node, pv): '''attach common attributes from EPICS to the HDF5 tree node''' pvname = os.path.splitext(pv.pvname)[0] desc = epics.caget(pvname + '.DESC') or '' eznx.addAttributes( node, epics_pv=pv.pvname, units=pv.units or '', epics_type=pv.type, epics_description=desc, )
def write_HDF5(self, hdf5File): ''' write_ the HDF5 file *while parsing* the XML file ''' nx_root = eznx.makeFile(hdf5File) eznx.addAttributes(nx_root, creator='xml2hdf5.py') group_list = self.process_SASentry(self.root, nx_root) if len(group_list) > 0: default = group_list[0].name.split('/')[-1] eznx.addAttributes(nx_root, default=default) nx_root.close()
def process_SASentry(self, xml_parent, nx_parent): ''' process any SASentry groups ''' nx_node_list = [] xml_node_list = xml_parent.findall('cs:SASentry', self.ns) for i, sasentry in enumerate(xml_node_list): nm = 'sasentry' if len(xml_node_list) > 1: nm += '_' + str(i) nm = sasentry.attrib.get('name', nm) nm_clean = self.unique_name(nm, nx_parent) nxentry = eznx.makeGroup(nx_parent, nm_clean, 'NXentry', canSAS_class='SASentry', canSAS_name=nm) nx_node_list.append(nxentry) eznx.makeDataset(nxentry, 'definition', 'NXcanSAS') # process the groups that may appear more than once group_list = self.process_SASdata(sasentry, nxentry) if len(group_list) > 0: default = group_list[0].name.split('/')[-1] eznx.addAttributes(nxentry, default=default) self.process_Run(sasentry, nxentry) self.process_SAStransmission_spectrum(sasentry, nxentry) self.process_SASprocess(sasentry, nxentry) self.process_SASnote(sasentry, nxentry) # process any other items for xmlnode in sasentry: tag = str(xmlnode.tag) if tag.endswith('}Title'): self.field_text(xmlnode, nxentry, node_name='title') elif tag.endswith('}Run'): pass # handled above elif tag.endswith('}SASdata'): pass # handled above elif tag.endswith('}SASsample'): self.process_SASsample(xmlnode, nxentry) elif tag.endswith('}SASinstrument'): self.process_SASinstrument(xmlnode, nxentry) elif tag.endswith('}SASprocess'): pass # handled above elif tag.endswith('}SASnote'): pass # handled above elif tag.endswith('}SAStransmission_spectrum'): pass # handled above else: self.process_unexpected_xml_element(xmlnode, nxentry) return nx_node_list
def process_SASprocess(self, xml_parent, nx_parent): ''' process any SASprocess groups ''' xml_node_list = xml_parent.findall('cs:SASprocess', self.ns) for i, xml_group in enumerate(xml_node_list): nm = 'sasprocess' if len(xml_node_list) > 1: nm += '_' + str(i) nm = xml_group.attrib.get('name', nm) nm_clean = self.unique_name(nm, nx_parent) nxprocess = eznx.makeGroup(nx_parent, nm_clean, 'NXprocess', canSAS_class='SASprocess', canSAS_name=nm) term_counter = 0 for xmlnode in xml_group: if isinstance(xmlnode.tag, str): if xmlnode.tag.endswith('}name'): self.field_text(xmlnode, nxprocess) elif xmlnode.tag.endswith('}date'): # TODO: test for ISO-8601? # need to convert from arbitrary representations # 01-DEC-2008 04:30:25 # 1-Jul-1998 14:57:37 # 04-Sep-2007 18:12:27 # Tue, May 20, 2008 1:39:23 PM # Tue, Aug 21, 2007 # 1999-01-04 20:15:45 # 1999-01-04T20:15:45 self.field_text(xmlnode, nxprocess) elif xmlnode.tag.endswith('}description'): self.field_text(xmlnode, nxprocess) elif xmlnode.tag.endswith('}term'): nm = 'term_' + str(term_counter) term_counter += 1 ds = self.field_text(xmlnode, nxprocess, node_name=nm) self.copy_attributes(xmlnode, ds) units = xmlnode.attrib.get('unit') if units is not None: eznx.addAttributes(ds, units=units) del ds.attrs[ 'unit'] # remove the canSAS singular name elif xmlnode.tag.endswith('}SASprocessnote'): pass # handled below else: self.process_unexpected_xml_element(xmlnode, nxprocess) self.process_SASprocessnote(xml_group, nxprocess)
def process_SAStransmission_spectrum(self, xml_parent, nx_parent): ''' process any SAStransmission_spectrum groups These are handled similar to SASdata but with different nouns Shouldn't this be located (in NeXus) at /NXentry/NXsample/transmission? ''' nx_node_list = [] xml_node_list = xml_parent.findall('cs:SAStransmission_spectrum', self.ns) for i, sas_ts in enumerate(xml_node_list): nm = 'transmission_spectrum' if len(xml_node_list) > 1: nm += '_' + str(i) nm_clean = self.unique_name(nm, nx_parent) nxdata = eznx.makeGroup(nx_parent, nm_clean, 'NXdata', canSAS_class='SAStransmission_spectrum', ) nm = sas_ts.attrib.get('name') if nm is not None: eznx.addAttributes(nxdata, name=nm) nx_node_list.append(nxdata) # collect the data arrays data = {} units = {} for xmlnode in sas_ts: if isinstance(xmlnode.tag, str): # avoid XML Comments if str(xmlnode.tag).endswith('}Tdata'): for xmldata in xmlnode: try: tag = ns_strip(xmldata) except AttributeError as _exc: continue # an XML comment triggered this if tag not in data: data[tag] = [] units[tag] = xmldata.get('unit', 'none') data[tag].append(xmldata.text) else: self.process_unexpected_xml_element(xmlnode, nxdata) # write the data arrays nx_obj = {} for nm, arr in data.items(): try: nx_obj[nm] = eznx.makeDataset(nxdata, nm, map(float, data[nm]), units=units[nm]) except TypeError as _exc: pass # set the NeXus plottable data attributes if 'T' in data: eznx.addAttributes(nxdata, signal='T') if 'Lambda' in data: eznx.addAttributes(nxdata, axes='Lambda') # NeXus if 'Tdev' in data: eznx.addAttributes(nx_obj['T'], uncertainties='Tdev') # NeXus return nx_node_list
def process_SASprocess(self, xml_parent, nx_parent): ''' process any SASprocess groups ''' xml_node_list = xml_parent.findall('cs:SASprocess', self.ns) for i, xml_group in enumerate(xml_node_list): nm = 'sasprocess' if len(xml_node_list) > 1: nm += '_' + str(i) nm = xml_group.attrib.get('name', nm) nm_clean = self.unique_name(nm, nx_parent) nxprocess = eznx.makeGroup(nx_parent, nm_clean, 'NXprocess', canSAS_class='SASprocess', canSAS_name=nm) term_counter = 0 for xmlnode in xml_group: if isinstance(xmlnode.tag, str): if xmlnode.tag.endswith('}name'): self.field_text(xmlnode, nxprocess) elif xmlnode.tag.endswith('}date'): # TODO: test for ISO-8601? # need to convert from arbitrary representations # 01-DEC-2008 04:30:25 # 1-Jul-1998 14:57:37 # 04-Sep-2007 18:12:27 # Tue, May 20, 2008 1:39:23 PM # Tue, Aug 21, 2007 # 1999-01-04 20:15:45 # 1999-01-04T20:15:45 self.field_text(xmlnode, nxprocess) elif xmlnode.tag.endswith('}description'): self.field_text(xmlnode, nxprocess) elif xmlnode.tag.endswith('}term'): nm = 'term_'+str(term_counter) term_counter += 1 ds = self.field_text(xmlnode, nxprocess, node_name=nm) self.copy_attributes(xmlnode, ds) units = xmlnode.attrib.get('unit') if units is not None: eznx.addAttributes(ds, units=units) del ds.attrs['unit'] # remove the canSAS singular name elif xmlnode.tag.endswith('}SASprocessnote'): pass # handled below else: self.process_unexpected_xml_element(xmlnode, nxprocess) self.process_SASprocessnote(xml_group, nxprocess)
def process_SASdetector(self, xml_parent, nx_parent): ''' process any SASdetector groups ''' xml_node_list = xml_parent.findall('cs:SASdetector', self.ns) for i, sas_group in enumerate(xml_node_list): if isinstance(sas_group.tag, str): # avoid XML Comments nm = sas_group.attrib.get('name', 'sasdetector') nm_clean = self.unique_name(nm, nx_parent) nxdetector = eznx.makeGroup(nx_parent, nm_clean, 'NXdetector', canSAS_class='SASdetector', canSAS_name=nm) for xmlnode in sas_group: if isinstance(xmlnode.tag, str): # avoid XML Comments if xmlnode.tag.endswith('}name'): eznx.makeDataset(nxdetector, 'name', (xmlnode.text or '').strip()) elif xmlnode.tag.endswith('}SDD'): ds = self.field_float(xmlnode, nxdetector) comment = 'Distance between sample and detector' eznx.addAttributes(ds, comment=comment) elif xmlnode.tag.endswith('}offset'): self.axis_values(xmlnode, nxdetector, '%s_position') elif xmlnode.tag.endswith('}orientation'): self.axis_values(xmlnode, nxdetector) elif xmlnode.tag.endswith('}beam_center'): self.axis_values(xmlnode, nxdetector, 'beam_center_%s') elif xmlnode.tag.endswith('}pixel_size'): self.axis_values(xmlnode, nxdetector, '%s_pixel_size') elif xmlnode.tag.endswith('}slit_length'): ds = self.field_float(xmlnode, nxdetector) comment = 'Slit length of the instrument for this detector, ' comment += 'expressed in the same units as Q' eznx.addAttributes(ds, comment=comment) else: self.process_unexpected_xml_element( xmlnode, nxdetector)
def saveFile(self): '''write all desired data to the file and exit this code''' t = datetime.datetime.now() #timestamp = ' '.join((t.strftime("%Y-%m-%d"), t.strftime("%H:%M:%S"))) timestamp = str(t).split('.')[0] f = group_registry['/'].hdf5_group eznx.addAttributes(f, timestamp=timestamp) # TODO: will len(caget(array)) = NORD or NELM? (useful data or full array) for pv_spec in pv_registry.values(): if not pv_spec.acquire_after_scan: continue if pv_spec.as_string: value = pv_spec.pv.get(as_string=True) else: value = pv_spec.pv.get() if value is [None]: value = 'no data' if not isinstance(value, numpy.ndarray): value = [value] else: if pv_spec.length_limit and pv_spec.length_limit in pv_registry: length_limit = pv_registry[pv_spec.length_limit].pv.get() if len(value) > length_limit: value = value[:length_limit] hdf5_parent = pv_spec.group_parent.hdf5_group try: ds = eznx.makeDataset(hdf5_parent, pv_spec.label, value) self._attachEpicsAttributes(ds, pv_spec.pv) eznx.addAttributes(ds, **pv_spec.attrib) except Exception as e: print "ERROR: ", pv_spec.label, value print "MESSAGE: ", e print "RESOLUTION: writing as error message string" eznx.makeDataset(hdf5_parent, pv_spec.label, [str(e)]) #raise # as the final step, make all the links as directed for _k, v in link_registry.items(): v.make_link(f) f.close() # be CERTAIN to close the file
def process_SASdetector(self, xml_parent, nx_parent): ''' process any SASdetector groups ''' xml_node_list = xml_parent.findall('cs:SASdetector', self.ns) for i, sas_group in enumerate(xml_node_list): if isinstance(sas_group.tag, str): # avoid XML Comments nm = sas_group.attrib.get('name', 'sasdetector') nm_clean = self.unique_name(nm, nx_parent) nxdetector = eznx.makeGroup(nx_parent, nm_clean, 'NXdetector', canSAS_class='SASdetector', canSAS_name=nm) for xmlnode in sas_group: if isinstance(xmlnode.tag, str): # avoid XML Comments if xmlnode.tag.endswith('}name'): eznx.makeDataset(nxdetector, 'name', (xmlnode.text or '').strip()) elif xmlnode.tag.endswith('}SDD'): ds = self.field_float(xmlnode, nxdetector) comment = 'Distance between sample and detector' eznx.addAttributes(ds, comment=comment) elif xmlnode.tag.endswith('}offset'): self.axis_values(xmlnode, nxdetector, '%s_position') elif xmlnode.tag.endswith('}orientation'): self.axis_values(xmlnode, nxdetector) elif xmlnode.tag.endswith('}beam_center'): self.axis_values(xmlnode, nxdetector, 'beam_center_%s') elif xmlnode.tag.endswith('}pixel_size'): self.axis_values(xmlnode, nxdetector, '%s_pixel_size') elif xmlnode.tag.endswith('}slit_length'): ds = self.field_float(xmlnode, nxdetector) comment = 'Slit length of the instrument for this detector, ' comment += 'expressed in the same units as Q' eznx.addAttributes(ds, comment=comment) else: self.process_unexpected_xml_element(xmlnode, nxdetector)
def process_SASdata(self, xml_parent, nx_parent): ''' process any SASdata groups ''' nx_node_list = [] xml_node_list = xml_parent.findall('cs:SASdata', self.ns) for i, sasdata in enumerate(xml_node_list): nm = 'sasdata' if len(xml_node_list) > 1: nm += '_' + str(i) nm = sasdata.attrib.get('name', nm) nm_clean = self.unique_name(nm, nx_parent) nxdata = eznx.makeGroup(nx_parent, nm_clean, 'NXdata', canSAS_class='SASdata', canSAS_name=nm) nx_node_list.append(nxdata) # collect the SAS data arrays data = {} units = {} for xmlnode in sasdata: if isinstance(xmlnode.tag, str): # avoid XML Comments if str(xmlnode.tag).endswith('}Idata'): for xmldata in xmlnode: if isinstance(xmldata.tag, str): tag = ns_strip(xmldata) if tag not in data: data[tag] = [] units[tag] = xmldata.get('unit', 'none') data[tag].append(xmldata.text) else: self.process_unexpected_xml_element(xmlnode, nxdata) # write the data arrays nx_obj = {} for nm, arr in data.items(): try: nx_obj[nm] = eznx.makeDataset(nxdata, nm, map(float, data[nm]), units=units[nm]) except TypeError as _exc: pass # set the NeXus plottable data attributes if 'I' in data: eznx.addAttributes(nxdata, signal='I') if 'Q' in data: eznx.addAttributes(nxdata, axes='Q') # NeXus if 'Idev' in data: eznx.addAttributes(nx_obj['I'], uncertainties='Idev') # NeXus if 'Qdev' in data: eznx.addAttributes(nx_obj['Q'], resolutions='Qdev') # NeXus if 'dQw' in data and 'dQl' in data: # not a common occurrence # consider: Qdev or dQw & dQl # http://cansas-org.github.io/canSAS2012/notation.html?highlight=uncertainty if 'Qdev' not in data: # canSAS1d rules say either Qdev OR (dQw, dQl), not both eznx.addAttributes(nx_obj['Q'], resolutions=['dQw', 'dQl']) return nx_node_list
def process_SAStransmission_spectrum(self, xml_parent, nx_parent): ''' process any SAStransmission_spectrum groups These are handled similar to SASdata but with different nouns Shouldn't this be located (in NeXus) at /NXentry/NXsample/transmission? ''' nx_node_list = [] xml_node_list = xml_parent.findall('cs:SAStransmission_spectrum', self.ns) for i, sas_ts in enumerate(xml_node_list): nm = 'transmission_spectrum' if len(xml_node_list) > 1: nm += '_' + str(i) nm_clean = self.unique_name(nm, nx_parent) nxdata = eznx.makeGroup( nx_parent, nm_clean, 'NXdata', canSAS_class='SAStransmission_spectrum', ) nm = sas_ts.attrib.get('name') if nm is not None: eznx.addAttributes(nxdata, name=nm) nx_node_list.append(nxdata) # collect the data arrays data = {} units = {} for xmlnode in sas_ts: if isinstance(xmlnode.tag, str): # avoid XML Comments if str(xmlnode.tag).endswith('}Tdata'): for xmldata in xmlnode: try: tag = ns_strip(xmldata) except AttributeError as _exc: continue # an XML comment triggered this if tag not in data: data[tag] = [] units[tag] = xmldata.get('unit', 'none') data[tag].append(xmldata.text) else: self.process_unexpected_xml_element(xmlnode, nxdata) # write the data arrays nx_obj = {} for nm, arr in data.items(): try: nx_obj[nm] = eznx.makeDataset(nxdata, nm, map(float, data[nm]), units=units[nm]) except TypeError as _exc: pass # set the NeXus plottable data attributes if 'T' in data: eznx.addAttributes(nxdata, signal='T') if 'Lambda' in data: eznx.addAttributes(nxdata, axes='Lambda') # NeXus if 'Tdev' in data: eznx.addAttributes(nx_obj['T'], uncertainties='Tdev') # NeXus return nx_node_list
return for item in node._seq: nested(item) def convert(xmlFile, hdf5File): '''read the canSAS1D XML file and write a NXcanSAS HDF5 file''' try: sasxml = ingest(xmlFile) except cansas1d.Exception_canSAS_namespace, answer: raise ValueError("wrong XML namespace:" + answer) except cansas1d.Exception_canSAS_version, answer: raise ValueError("wrong version string:" + answer) nx_root = eznx.makeFile(hdf5File) eznx.addAttributes(nx_root, creator='canSAS1d_to_NXcanSAS.py') nested(sasxml) if __name__ == "__main__": if len(sys.argv) == 2: demo(sys.argv[1]) else: filelist = ''' bimodal-test1.xml s81-polyurea.xml '''.strip().split() for fname in filelist: xmlFile = os.path.join('..', 'xml', 'bimodal-test1.xml') # demo(xmlFile) hdf5File = os.path.join('..', os.path.splitext(fname)[0] + '.h5')
def save(self, hfile=None, key=None): ''' save the reduced data group to an HDF5 file, return filename or None if not written :param str hfile: output HDF5 file name (default: input HDF5 file) :param str key: name of reduced data set (default: nothing will be saved) By default, save to the input HDF5 file. To override this, specify the output HDF5 file name when calling this method. * If the file exists, this will not overwrite any input data. * Full, reduced :math:`R(Q)` goes into NXdata group:: /entry/flyScan_reduced_full * any previous full reduced :math:`R(Q)` will be replaced. * It may replace the rebinned, reduced :math:`R(Q)` if a NXdata group of the same number of bins exists. * Rebinned, reduced :math:`R(Q)` goes into NXdata group:: /entry/flyScan_reduced_<N> where ``<N>`` is the number of bins, such as (for 500 bins):: /entry/flyScan_reduced_500 :see: http://download.nexusformat.org/doc/html/classes/base_classes/NXentry.html :see: http://download.nexusformat.org/doc/html/classes/base_classes/NXdata.html ''' # TODO: save with NXprocess/NXdata structure # TODO: link that NXdata up to NXentry level # TODO: change /NXentry@default to point to best NXdata reduced # TODO: What about NXcanSAS? key = str(key) if key not in self.reduced: return nxname = 'flyScan_reduced_' + key hfile = hfile or self.hdf5_file_name ds = self.reduced[key] with h5py.File(hfile, 'a') as hdf: if 'default' not in hdf.attrs: hdf.attrs['default'] = 'entry' nxentry = eznx.openGroup(hdf, 'entry', 'NXentry') if 'default' not in nxentry.attrs: nxentry.attrs['default'] = nxname nxdata = eznx.openGroup( nxentry, nxname, 'NXdata', signal='R', axes='Q', Q_indices=0, timestamp=calc.iso8601_datetime(), ) for key in sorted(ds.keys()): try: _ds = eznx.write_dataset(nxdata, key, ds[key]) if key in self.units: eznx.addAttributes(_ds, units=self.units[key]) except RuntimeError as e: pass # TODO: reporting return hfile
# create file and group structure root = eznx.makeFile(TARGET_FILE, default='entry') nxentry = eznx.makeGroup(root, 'entry', 'NXentry', default='data') nxdata = eznx.makeGroup(nxentry, 'data', 'NXdata', signal='frames', ) nxinstrument = eznx.makeGroup(nxentry, 'instrument', 'NXinstrument') nxdetector = eznx.makeGroup(nxinstrument, 'detector', 'NXdetector') nxsource = eznx.makeGroup(nxinstrument, 'source', 'NXsource') nxmonochromator = eznx.makeGroup(nxinstrument, 'monochromator', 'NXmonochromator') nxcollimator = eznx.makeGroup(nxinstrument, 'collimator', 'NXcollimator') nxgeometry_slit = eznx.makeGroup(nxcollimator, 'geometry', 'NXgeometry') nxshape_slit = eznx.makeGroup(nxgeometry_slit, 'shape', 'NXshape') nxsample = eznx.makeGroup(nxinstrument, 'sample', 'NXsample') nxmonitor = eznx.makeGroup(nxinstrument, 'control', 'NXmonitor') # various metadata eznx.addAttributes(root, creator=h5.attrs['creator'] + ' and spec2nexus.eznx') eznx.write_dataset(nxentry, 'title', 'NeXus NXsas example') eznx.write_dataset(nxentry, 'definition', 'NXsas', URL='http://download.nexusformat.org/doc/html/classes/applications/NXsas.html') eznx.write_dataset(nxentry, 'start_time', h5_files[0].attrs['file_time']) eznx.write_dataset(nxentry, 'end_time', h5_files[-1].attrs['file_time']) eznx.write_dataset(nxdetector, 'frame_files', '\n'.join(names)) eznx.write_dataset(nxinstrument, 'name', 'APS 9-ID-C USAXS pinSAXS') eznx.write_dataset(nxsource, 'type', 'Synchrotron X-ray Source') eznx.write_dataset(nxsource, 'name', 'Advanced Photon Source Undulator A, sector 9ID-C') eznx.write_dataset(nxsource, 'probe', 'x-ray') eznx.write_dataset(nxsource, 'current', h5['/entry/EPICS_PV_metadata/SRcurrent'], units='mA') eznx.write_dataset(nxsource, 'energy', float(7), units='GeV') eznx.write_dataset(nxmonochromator, 'energy', h5['/entry/instrument/monochromator/energy'], units='keV') eznx.write_dataset(nxmonochromator, 'wavelength', h5['/entry/EPICS_PV_metadata/wavelength'], units='Angstroms') eznx.write_dataset(nxmonochromator, 'wavelength_spread', h5['/entry/EPICS_PV_metadata/wavelength_spread'], units='Angstroms/Angstroms') eznx.write_dataset(nxshape_slit, 'shape', 'nxbox')
def save(self, hfile = None, key = None): ''' save the reduced data group to an HDF5 file, return filename or None if not written :param str hfile: output HDF5 file name (default: input HDF5 file) :param str key: name of reduced data set (default: nothing will be saved) By default, save to the input HDF5 file. To override this, specify the output HDF5 file name when calling this method. * If the file exists, this will not overwrite any input data. * Full, reduced :math:`R(Q)` goes into NXdata group:: /entry/areaDetector_reduced_full * any previous full reduced :math:`R(Q)` will be replaced. * It may replace the rebinned, reduced :math:`R(Q)` if a NXdata group of the same number of bins exists. * Rebinned, reduced :math:`R(Q)` goes into NXdata group:: /entry/areaDetector_reduced_<N> where ``<N>`` is the number of bins, such as (for 500 bins):: /entry/areaDetector_reduced_500 :see: http://download.nexusformat.org/doc/html/classes/base_classes/NXentry.html :see: http://download.nexusformat.org/doc/html/classes/base_classes/NXdata.html ''' key = str(key) if key not in self.reduced: return nxname = 'areaDetector_reduced_' + key hfile = hfile or self.hdf5_file_name ds = self.reduced[key] try: hdf = h5py.File(hfile, 'a') except IOError as _exc: # FIXME: some h5py problem in <h5py>/_hl/files.py, line 101 # this fails: fid = h5f.open(name, h5f.ACC_RDWR, fapl=fapl) # with IOError that is improperly caught on next and then: # fid = h5f.create(name, h5f.ACC_EXCL, fapl=fapl, fcpl=fcpl) fails with IOError # since the second call has "name" with all lower case # # real problem is that these HDF5 files have the wrong uid/gid, as set by the Pilatus computer # TODO: fix each Pilatus and this problem will go away # TODO: change uid/gid on all the acquired HDF5 files (*.h5, *.hdf) under usaxscontrol:/share1/USAXS_data/2* # Files should be owned by usaxs:usaxs (1810:2026), but are owned by tomo2:usaxs (500:2026) as seen by usaxs@usaxscontrol # not enough to change the "umask" on the det@dec1122 computer, what else will fix this? pvwatch.logMessage( "Problem writing reduced data back to file: " + hfile ) return if 'default' not in hdf.attrs: hdf.attrs['default'] = 'entry' nxentry = eznx.openGroup(hdf, 'entry', 'NXentry') if 'default' not in nxentry.attrs: nxentry.attrs['default'] = nxname nxdata = eznx.openGroup(nxentry, nxname, 'NXdata', signal='R', axes='Q', Q_indices=0, timestamp=calc.iso8601_datetime(), ) for key in sorted(ds.keys()): try: _ds = eznx.write_dataset(nxdata, key, ds[key]) if key in self.units: eznx.addAttributes(_ds, units=self.units[key]) except RuntimeError as e: pass # TODO: reporting hdf.close() return hfile
'NXdata', signal='frames', ) nxinstrument = eznx.makeGroup(nxentry, 'instrument', 'NXinstrument') nxdetector = eznx.makeGroup(nxinstrument, 'detector', 'NXdetector') nxsource = eznx.makeGroup(nxinstrument, 'source', 'NXsource') nxmonochromator = eznx.makeGroup(nxinstrument, 'monochromator', 'NXmonochromator') nxcollimator = eznx.makeGroup(nxinstrument, 'collimator', 'NXcollimator') nxgeometry_slit = eznx.makeGroup(nxcollimator, 'geometry', 'NXgeometry') nxshape_slit = eznx.makeGroup(nxgeometry_slit, 'shape', 'NXshape') nxsample = eznx.makeGroup(nxinstrument, 'sample', 'NXsample') nxmonitor = eznx.makeGroup(nxinstrument, 'control', 'NXmonitor') # various metadata eznx.addAttributes(root, creator=h5.attrs['creator'] + ' and spec2nexus.eznx') eznx.write_dataset(nxentry, 'title', 'NeXus NXsas example') eznx.write_dataset( nxentry, 'definition', 'NXsas', URL= 'http://download.nexusformat.org/doc/html/classes/applications/NXsas.html') eznx.write_dataset(nxentry, 'start_time', h5_files[0].attrs['file_time']) eznx.write_dataset(nxentry, 'end_time', h5_files[-1].attrs['file_time']) eznx.write_dataset(nxdetector, 'frame_files', '\n'.join(names)) eznx.write_dataset(nxinstrument, 'name', 'APS 9-ID-C USAXS pinSAXS') eznx.write_dataset(nxsource, 'type', 'Synchrotron X-ray Source') eznx.write_dataset(nxsource, 'name', 'Advanced Photon Source Undulator A, sector 9ID-C') eznx.write_dataset(nxsource, 'probe', 'x-ray')