def sum_all_spectra(obj, **kwargs): """ This function takes all the spectra in the given object and sums them together. All of the sprectra are assumed to have the same axis scale. @param obj: Object in which all of the spectra are to be summed together @type obj: C{SOM.SOM} @param kwargs: A list of keyword arguments that the function accepts: @keyword rebin_axis: The axis(es) to rebin the spectra onto. @type rebin_axis: C{nessi_list.NessiList} or C{list} of C{nessi_list.NessiList}s @keyword rebin_axis_dim: Tthe dimension on the spectra being rebinned. The default value is I{1}. @type rebin_axis_dim: C{int} @keyword y_sort: A flag that will sort the spectrum IDs by y. The default behavior is I{False} and this maintains the original x ordering. @type y_sort: C{boolean} @keyword stripe: A flag that will combine spectra in either the x or y direction at a given y or x. The integration direction is based on the setting of y_sort. The default behavior is I{False} and corresponds to summing all spectra into one. @type stripe: C{boolean} @keyword pix_fix: A single or list of pixel IDs with which to override the summed spectrum pixel ID. The setting of y_sort determines is the x component (y_sort=False) or the y component (y_sort=True) of the pixel ID is overridden. @type pix_fix: C{int} or C{list} of C{int}s @return: Object containing a single spectrum @rtype: C{SOM.SOM} @raise TypeError: Anything other than a C{SOM} is given @raise RuntimeError: An unknown rebinning dimension is given """ o_descr = hlr_utils.get_descr(obj) if o_descr != "SOM": raise TypeError("Function argument must be a SOM") # Have a SOM, go on else: pass # If there is only one SO, why run if len(obj) == 1: return obj # OK, we need to sum else: pass try: rebin_axis = kwargs["rebin_axis"] except KeyError: rebin_axis = None try: rebin_axis_dim = kwargs["rebin_axis_dim"] except KeyError: rebin_axis_dim = 1 try: y_sort = kwargs["y_sort"] except KeyError: y_sort = False try: stripe = kwargs["stripe"] except KeyError: stripe = False try: pix_fix = kwargs["pixel_fix"] except KeyError: pix_fix = None import common_lib if rebin_axis is not None: if rebin_axis_dim == 1: obj1 = common_lib.rebin_axis_1D(obj, rebin_axis) elif rebin_axis_dim == 2: obj1 = common_lib.rebin_axis_2D(obj, rebin_axis[0], rebin_axis[1]) else: raise RuntimeError("Do not have rebinning method for %dD." % \ rebin_axis_dim) else: obj1 = obj del obj # Sort SO IDs by y value if y_sort: obj1.sort(lambda x, y: cmp(x.id[1][1], y.id[1][1])) # SO IDs are already sorted by x value else: pass (result, res_descr) = hlr_utils.empty_result(obj1) result = hlr_utils.copy_som_attr(result, res_descr, obj1, o_descr) if not stripe: # iterate through the values so_id_list = [] val1 = hlr_utils.get_value(obj1, 0, o_descr, "all") val2 = hlr_utils.get_value(obj1, 1, o_descr, "all") value = common_lib.add_ncerr(val1, val2) so_id_list.append(val1.id) so_id_list.append(val2.id) for i in xrange(2, hlr_utils.get_length(obj1)): val = hlr_utils.get_value(obj1, i, o_descr, "all") value = common_lib.add_ncerr(val, value) so_id_list.append(val.id) hlr_utils.result_insert(result, res_descr, value, None, "all") result.attr_list["Summed IDs"] = so_id_list if pix_fix is not None: if y_sort: fixed_pixel = (so_id_list[0][0], (pix_fix, so_id_list[0][1][1])) else: fixed_pixel = (so_id_list[0][0], (so_id_list[0][1][0], pix_fix)) else: fixed_pixel = so_id_list[0] result[0].id = fixed_pixel else: # iterate through the values so_id_list = [] i_start = 0 stripe_count = 0 total_size = hlr_utils.get_length(obj1) while i_start < total_size: stripe_list = [] counted = 2 val1 = hlr_utils.get_value(obj1, i_start, o_descr, "all") val2 = hlr_utils.get_value(obj1, i_start + 1, o_descr, "all") value = common_lib.add_ncerr(val1, val2) stripe_list.append(val1.id) stripe_list.append(val2.id) if y_sort: comp_id = val2.id[1][1] else: comp_id = val2.id[1][0] for i in xrange(i_start + 2, total_size): val = hlr_utils.get_value(obj1, i, o_descr, "all") if y_sort: new_id = val.id[1][1] else: new_id = val.id[1][0] if new_id > comp_id: break value = common_lib.add_ncerr(val, value) stripe_list.append(val.id) counted += 1 i_start += counted so_id_list.append(stripe_list) hlr_utils.result_insert(result, res_descr, value, None, "all") if pix_fix is not None: try: if y_sort: fixed_pixel = (stripe_list[0][0], (pix_fix[stripe_count], stripe_list[0][1][1])) else: fixed_pixel = (stripe_list[0][0], (stripe_list[0][1][0], pix_fix[stripe_count])) except TypeError: if y_sort: fixed_pixel = (stripe_list[0][0], (pix_fix, stripe_list[0][1][1])) else: fixed_pixel = (stripe_list[0][0], (stripe_list[0][1][0], pix_fix)) else: fixed_pixel = stripe_list[0] result[stripe_count].id = fixed_pixel stripe_count += 1 result.attr_list["summed_ids"] = so_id_list return result
def sum_all_spectra(obj, **kwargs): """ This function takes all the spectra in the given object and sums them together. All of the sprectra are assumed to have the same axis scale. @param obj: Object in which all of the spectra are to be summed together @type obj: C{SOM.SOM} @param kwargs: A list of keyword arguments that the function accepts: @keyword rebin_axis: The axis(es) to rebin the spectra onto. @type rebin_axis: C{nessi_list.NessiList} or C{list} of C{nessi_list.NessiList}s @keyword rebin_axis_dim: Tthe dimension on the spectra being rebinned. The default value is I{1}. @type rebin_axis_dim: C{int} @keyword y_sort: A flag that will sort the spectrum IDs by y. The default behavior is I{False} and this maintains the original x ordering. @type y_sort: C{boolean} @keyword stripe: A flag that will combine spectra in either the x or y direction at a given y or x. The integration direction is based on the setting of y_sort. The default behavior is I{False} and corresponds to summing all spectra into one. @type stripe: C{boolean} @keyword pix_fix: A single or list of pixel IDs with which to override the summed spectrum pixel ID. The setting of y_sort determines is the x component (y_sort=False) or the y component (y_sort=True) of the pixel ID is overridden. @type pix_fix: C{int} or C{list} of C{int}s @return: Object containing a single spectrum @rtype: C{SOM.SOM} @raise TypeError: Anything other than a C{SOM} is given @raise RuntimeError: An unknown rebinning dimension is given """ o_descr = hlr_utils.get_descr(obj) if o_descr != "SOM": raise TypeError("Function argument must be a SOM") # Have a SOM, go on else: pass # If there is only one SO, why run if len(obj) == 1: return obj # OK, we need to sum else: pass try: rebin_axis = kwargs["rebin_axis"] except KeyError: rebin_axis = None try: rebin_axis_dim = kwargs["rebin_axis_dim"] except KeyError: rebin_axis_dim = 1 try: y_sort = kwargs["y_sort"] except KeyError: y_sort = False try: stripe = kwargs["stripe"] except KeyError: stripe = False try: pix_fix = kwargs["pixel_fix"] except KeyError: pix_fix = None import common_lib if rebin_axis is not None: if rebin_axis_dim == 1: obj1 = common_lib.rebin_axis_1D(obj, rebin_axis) elif rebin_axis_dim == 2: obj1 = common_lib.rebin_axis_2D(obj, rebin_axis[0], rebin_axis[1]) else: raise RuntimeError("Do not have rebinning method for %dD." % \ rebin_axis_dim) else: obj1 = obj del obj # Sort SO IDs by y value if y_sort: obj1.sort(lambda x, y: cmp(x.id[1][1], y.id[1][1])) # SO IDs are already sorted by x value else: pass (result, res_descr) = hlr_utils.empty_result(obj1) result = hlr_utils.copy_som_attr(result, res_descr, obj1, o_descr) if not stripe: # iterate through the values so_id_list = [] val1 = hlr_utils.get_value(obj1, 0, o_descr, "all") val2 = hlr_utils.get_value(obj1, 1, o_descr, "all") value = common_lib.add_ncerr(val1, val2) so_id_list.append(val1.id) so_id_list.append(val2.id) for i in xrange(2, hlr_utils.get_length(obj1)): val = hlr_utils.get_value(obj1, i, o_descr, "all") value = common_lib.add_ncerr(val, value) so_id_list.append(val.id) hlr_utils.result_insert(result, res_descr, value, None, "all") result.attr_list["Summed IDs"] = so_id_list if pix_fix is not None: if y_sort: fixed_pixel = (so_id_list[0][0], (pix_fix, so_id_list[0][1][1])) else: fixed_pixel = (so_id_list[0][0], (so_id_list[0][1][0], pix_fix)) else: fixed_pixel = so_id_list[0] result[0].id = fixed_pixel else: # iterate through the values so_id_list = [] i_start = 0 stripe_count = 0 total_size = hlr_utils.get_length(obj1) while i_start < total_size: stripe_list = [] counted = 2 val1 = hlr_utils.get_value(obj1, i_start, o_descr, "all") val2 = hlr_utils.get_value(obj1, i_start+1, o_descr, "all") value = common_lib.add_ncerr(val1, val2) stripe_list.append(val1.id) stripe_list.append(val2.id) if y_sort: comp_id = val2.id[1][1] else: comp_id = val2.id[1][0] for i in xrange(i_start+2, total_size): val = hlr_utils.get_value(obj1, i, o_descr, "all") if y_sort: new_id = val.id[1][1] else: new_id = val.id[1][0] if new_id > comp_id: break value = common_lib.add_ncerr(val, value) stripe_list.append(val.id) counted += 1 i_start += counted so_id_list.append(stripe_list) hlr_utils.result_insert(result, res_descr, value, None, "all") if pix_fix is not None: try: if y_sort: fixed_pixel = (stripe_list[0][0], (pix_fix[stripe_count], stripe_list[0][1][1])) else: fixed_pixel = (stripe_list[0][0], (stripe_list[0][1][0], pix_fix[stripe_count])) except TypeError: if y_sort: fixed_pixel = (stripe_list[0][0], (pix_fix, stripe_list[0][1][1])) else: fixed_pixel = (stripe_list[0][0], (stripe_list[0][1][0], pix_fix)) else: fixed_pixel = stripe_list[0] result[stripe_count].id = fixed_pixel stripe_count += 1 result.attr_list["summed_ids"] = so_id_list return result
def add_files_bg(filelist, **kwargs): """ This function takes a list of U{NeXus<www.nexusformat.org>} files and various keyword arguments and returns a data C{SOM} and a background C{SOM} (if requested) that is the sum of all the data from the specified files. B{It is assumed that the files contain similar data as only crude cross-checks will be made. You have been warned.} @param filelist: A list containing the names of the files to sum @type filelist: C{list} @param kwargs: A list of keyword arguments that the function accepts: @keyword SO_Axis: This is the name of the main axis to read from the NeXus file @type SO_Axis: C{string} @keyword Data_Paths: This contains the data paths and signals for the requested detector banks @type Data_Paths: C{tuple} of C{tuple}s @keyword Signal_ROI: This is the name of a file that contains a list of pixel IDs that will be read from the data file and stored as a signal C{SOM} @type Signal_ROI: C{string} @keyword Bkg_ROI: This is the name of a file that contains a list of pixel IDs that will be read from the data file and stored as a background C{SOM} @type Bkg_ROI: C{string} @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword dst_type: The type of C{DST} to be created during file read-in. The default value is I{application/x-NeXus}. @type dst_type: C{string} @keyword Verbose: This is a flag to turn on print statments. The default is I{False}. @type Verbose: C{boolean} @keyword Timer: This is an SNS Timer object used for showing the performance timing in the function. @type Timer: C{sns_timing.Timer} @return: Signal C{SOM.SOM} and background C{SOM.SOM} @rtype: C{tuple} @raise SystemExit: If any file cannot be read """ import sys import common_lib import DST import hlr_utils # Parse keywords try: so_axis = kwargs["SO_Axis"] except KeyError: so_axis = "time_of_flight" try: data_paths = kwargs["Data_Paths"] except KeyError: data_paths = None try: signal_roi = kwargs["Signal_ROI"] except KeyError: signal_roi = None try: bkg_roi = kwargs["Bkg_ROI"] except KeyError: bkg_roi = None try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: dst_type = kwargs["dst_type"] except KeyError: try: dst_type = hlr_utils.file_peeker(filelist[0]) except RuntimeError: # Assume it is a NeXus file, since it is not a DR produced file dst_type = "application/x-NeXus" try: verbose = kwargs["Verbose"] except KeyError: verbose = False try: timer = kwargs["Timer"] except KeyError: timer = None counter = 0 for filename in filelist: if verbose: print "File:", filename try: if dst_type == "application/x-NeXus": data_dst = DST.getInstance(dst_type, filename) else: resource = open(filename, "r") data_dst = DST.getInstance(dst_type, resource) except SystemError: print "ERROR: Failed to data read file %s" % filename sys.exit(-1) if verbose: print "Reading data file %d" % counter if counter == 0: if dst_type == "application/x-NeXus": d_som1 = data_dst.getSOM(data_paths, so_axis, roi_file=signal_roi) d_som1.rekeyNxPars(dataset_type) else: if dst_type != "text/Dave2d": d_som1 = data_dst.getSOM(data_paths, roi_file=signal_roi) else: d_som1 = data_dst.getSOM(data_paths) if verbose: print "# Signal SO:", len(d_som1) if dst_type == "application/x-NeXus": print "# TOF:", len(d_som1[0]) print "# TOF Axis:", len(d_som1[0].axis[0].val) elif dst_type != "text/num-info": print "# Data Size:", len(d_som1[0]) print "# X-Axis:", len(d_som1[0].axis[0].val) try: axis_len = len(d_som1[0].axis[1].val) print "# Y-Axis:", axis_len except IndexError: pass if bkg_roi is not None: if dst_type == "application/x-NeXus": b_som1 = data_dst.getSOM(data_paths, so_axis, roi_file=bkg_roi) b_som1.rekeyNxPars(dataset_type) else: if dst_type != "text/Dave2d": b_som1 = data_dst.getSOM(data_paths, roi_file=bkg_roi) else: b_som1 = data_dst.getSOM(data_paths) if verbose: print "# Background SO:", len(b_som1) else: b_som1 = None if timer is not None: timer.getTime(msg="After reading data") else: if dst_type == "application/x-NeXus": d_som_t = data_dst.getSOM(data_paths, so_axis, roi_file=signal_roi) d_som_t.rekeyNxPars(dataset_type) add_nxpars_sig = True else: if dst_type != "text/Dave2d": d_som_t = data_dst.getSOM(data_paths, roi_file=signal_roi) else: d_som_t = data_dst.getSOM(data_paths) add_nxpars_sig = False if bkg_roi is not None: if dst_type == "application/x-NeXus": b_som_t = data_dst.getSOM(data_paths, so_axis, roi_file=bkg_roi) b_som_t.rekeyNxPars(dataset_type) add_nxpars_bkg = True else: if dst_type != "text/Dave2d": b_som_t = data_dst.getSOM(data_paths, roi_file=bkg_roi) else: b_som_t = data_dst.getSOM(data_paths) add_nxpars_bkg = False else: b_som_t = None if timer is not None: timer.getTime(msg="After reading data") d_som1 = common_lib.add_ncerr(d_som_t, d_som1, add_nxpars=add_nxpars_sig) if bkg_roi is not None: b_som1 = common_lib.add_ncerr(b_som_t, b_som1, add_nxpars=add_nxpars_bkg) if timer is not None: timer.getTime(msg="After adding spectra") del d_som_t if bkg_roi is not None: del b_som_t if timer is not None: timer.getTime(msg="After SOM deletion") data_dst.release_resource() del data_dst counter += 1 if timer is not None: timer.getTime(msg="After resource release and DST deletion") if dst_type == "application/x-NeXus": som_key_parts = [dataset_type, "filename"] som_key = "-".join(som_key_parts) d_som1.attr_list[som_key] = filelist if b_som1 is not None: b_som1.attr_list[som_key] = filelist else: # Previously written files already have this structure imposed pass return (d_som1, b_som1)
def add_files_dm(filelist, **kwargs): """ This function takes a list of U{NeXus<www.nexusformat.org>} files and various keyword arguments and returns a data C{SOM} and a monitor C{SOM} that is the sum of all the data from the specified files. B{It is assumed that the files contain similar data as only crude cross-checks will be made. You have been warned.} @param filelist: A list containing the names of the files to sum @type filelist: C{list} @param kwargs: A list of keyword arguments that the function accepts: @keyword SO_Axis: This is the name of the main axis to read from the NeXus file @type SO_Axis: C{string} @keyword Data_Paths: This contains the data paths and signals for the requested detector banks @type Data_Paths: C{tuple} of C{tuple}s @keyword Mon_Paths: This contains the data paths and signals for the requested monitor banks @type Mon_Paths: C{tuple} of C{tuple}s @keyword Signal_ROI: This is the name of a file that contains a list of pixel IDs that will be read from the data file and stored as a signal C{SOM} @type Signal_ROI: C{string} @keyword Signal_MASK: This is the name of a file that contains a list of pixel IDs that will be read from the data file and stored as a signal C{SOM} @type Signal_MASK: C{string} @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword dataset_cwp: A set of chopper phase corrections for the dataset. This will instruct the function to shift the TOF axes of mulitple datasets and perform rebinning. The TOF axis for the first dataset is the one that all other datasets will be rebinned to. @type dataset_cwp: C{list} of C{float}s @keyword Verbose: This is a flag to turn on print statments. The default is I{False}. @type Verbose: C{boolean} @keyword Timer: This is an SNS Timer object used for showing the performance timing in the function. @type Timer: C{sns_timing.Timer} @return: Signal C{SOM.SOM} and monitor C{SOM.SOM} @rtype: C{tuple} @raise SystemExit: If any file cannot be read @raise RuntimeError: If both a ROI and MASK file are specified """ import sys import common_lib import DST # Parse keywords try: so_axis = kwargs["SO_Axis"] except KeyError: so_axis = "time_of_flight" try: data_paths = kwargs["Data_Paths"] except KeyError: data_paths = None try: mon_paths = kwargs["Mon_Paths"] except KeyError: mon_paths = None try: signal_roi = kwargs["Signal_ROI"] except KeyError: signal_roi = None try: signal_mask = kwargs["Signal_MASK"] except KeyError: signal_mask = None try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: verbose = kwargs["Verbose"] except KeyError: verbose = False try: timer = kwargs["Timer"] except KeyError: timer = None dataset_cwp = kwargs.get("dataset_cwp") if signal_roi is not None and signal_mask is not None: raise RuntimeError("Cannot specify both ROI and MASK file! Please "\ +"choose!") dst_type = "application/x-NeXus" counter = 0 for filename in filelist: if verbose: print "File:", filename if dataset_cwp is not None: print "TOF Offset:", dataset_cwp[counter] if dataset_cwp is not None: cwp = dataset_cwp[counter] else: cwp = None try: data_dst = DST.getInstance(dst_type, filename) except SystemError: print "ERROR: Failed to data read file %s" % filename sys.exit(-1) if timer is not None: timer.getTime(msg="After parsing file") if verbose: print "Reading data file %d" % counter if counter == 0: d_som1 = data_dst.getSOM(data_paths, so_axis, roi_file=signal_roi, mask_file=signal_mask, tof_offset=cwp) d_som1.rekeyNxPars(dataset_type) if verbose: print "# Signal SO:", len(d_som1) try: print "# TOF:", len(d_som1[0]) print "# TOF Axis:", len(d_som1[0].axis[0].val) except IndexError: # No data is present so say so again print "information is unavailable since no data "\ +"present. Exiting." sys.exit(0) if timer is not None: timer.getTime(msg="After reading data") if mon_paths is not None: if verbose: print "Reading monitor %d" % counter if counter == 0: m_som1 = data_dst.getSOM(mon_paths, so_axis, tof_offset=cwp) m_som1.rekeyNxPars(dataset_type) if verbose: print "# Monitor SO:", len(m_som1) print "# TOF:", len(m_som1[0]) print "# TOF Axis:", len(m_som1[0].axis[0].val) if timer is not None: timer.getTime(msg="After reading monitor data") else: m_som1 = None else: d_som_t0 = data_dst.getSOM(data_paths, so_axis, roi_file=signal_roi, mask_file=signal_mask, tof_offset=cwp) d_som_t0.rekeyNxPars(dataset_type) if timer is not None: timer.getTime(msg="After reading data") if dataset_cwp is not None: d_som_t = common_lib.rebin_axis_1D_frac( d_som_t0, d_som1[0].axis[0].val) del d_som_t0 else: d_som_t = d_som_t0 d_som1 = common_lib.add_ncerr(d_som_t, d_som1, add_nxpars=True) if timer is not None: timer.getTime(msg="After adding data spectra") del d_som_t if timer is not None: timer.getTime(msg="After data SOM deletion") if mon_paths is not None: m_som_t0 = data_dst.getSOM(mon_paths, so_axis, tof_offset=cwp) m_som_t0.rekeyNxPars(dataset_type) if timer is not None: timer.getTime(msg="After reading monitor data") if dataset_cwp is not None: m_som_t = common_lib.rebin_axis_1D_frac( m_som_t0, m_som1[0].axis[0].val) del m_som_t0 else: m_som_t = m_som_t0 m_som1 = common_lib.add_ncerr(m_som_t, m_som1, add_nxpars=True) if timer is not None: timer.getTime(msg="After adding monitor spectra") del m_som_t if timer is not None: timer.getTime(msg="After monitor SOM deletion") data_dst.release_resource() del data_dst counter += 1 if timer is not None: timer.getTime(msg="After resource release and DST deletion") som_key_parts = [dataset_type, "filename"] som_key = "-".join(som_key_parts) d_som1.attr_list[som_key] = filelist if m_som1 is not None: m_som1.attr_list[som_key] = filelist return (d_som1, m_som1)
def add_files(filelist, **kwargs): """ This function takes a list of U{NeXus<www.nexusformat.org>} files and various keyword arguments and returns a data C{SOM} and a background C{SOM} (if requested) that is the sum of all the data from the specified files. B{It is assumed that the files contain similar data as only crude cross-checks will be made. You have been warned.} @param filelist: A list containing the names of the files to sum @type filelist: C{list} @param kwargs: A list of keyword arguments that the function accepts: @keyword SO_Axis: This is the name of the main axis to read from the NeXus file @type SO_Axis: C{string} @keyword Data_Paths: This contains the data paths and signals for the requested detector banks @type Data_Paths: C{tuple} of C{tuple}s @keyword Signal_ROI: This is the name of a file that contains a list of pixel IDs that will be read from the data file and stored as a signal C{SOM} @type Signal_ROI: C{string} @keyword Signal_MASK: This is the name of a file that contains a list of pixel IDs that will be read from the data file and stored as a signal C{SOM} @type Signal_MASK: C{string} @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword dst_type: The type of C{DST} to be created during file read-in. The default value is I{application/x-NeXus}. @type dst_type: C{string} @keyword Verbose: This is a flag to turn on print statments. The default is I{False}. @type Verbose: C{boolean} @keyword Timer: This is an SNS Timer object used for showing the performance timing in the function. @type Timer: C{sns_timing.Timer} @return: Signal C{SOM.SOM} and background C{SOM.SOM} @rtype: C{tuple} @raise SystemExit: If any file cannot be read @raise RuntimeError: If both a ROI and MASK file are specified """ import sys import common_lib import DST import hlr_utils # Parse keywords try: so_axis = kwargs["SO_Axis"] except KeyError: so_axis = "time_of_flight" try: data_paths = kwargs["Data_Paths"] except KeyError: data_paths = None try: signal_roi = kwargs["Signal_ROI"] except KeyError: signal_roi = None try: signal_mask = kwargs["Signal_MASK"] except KeyError: signal_mask = None try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: dst_type = kwargs["dst_type"] except KeyError: try: dst_type = hlr_utils.file_peeker(filelist[0]) except RuntimeError: # Assume it is a NeXus file, since it is not a DR produced file dst_type = "application/x-NeXus" try: verbose = kwargs["Verbose"] except KeyError: verbose = False try: timer = kwargs["Timer"] except KeyError: timer = None if signal_roi is not None and signal_mask is not None: raise RuntimeError("Cannot specify both ROI and MASK file! Please " + "choose!") counter = 0 for filename in filelist: if verbose: print "File:", filename try: if dst_type == "application/x-NeXus": data_dst = DST.getInstance(dst_type, filename) else: resource = open(filename, "r") data_dst = DST.getInstance(dst_type, resource) except SystemError: print "ERROR: Failed to data read file %s" % filename sys.exit(-1) if verbose: print "Reading data file %d" % counter if counter == 0: if dst_type == "application/x-NeXus": d_som1 = data_dst.getSOM(data_paths, so_axis, roi_file=signal_roi, mask_file=signal_mask) d_som1.rekeyNxPars(dataset_type) else: if dst_type != "text/Dave2d": d_som1 = data_dst.getSOM(data_paths, roi_file=signal_roi, mask_file=signal_mask) else: d_som1 = data_dst.getSOM(data_paths) if verbose: len_data = len(d_som1) print "# Signal SO:", len_data if len_data == 0: print "All data has been filtered. Program exiting." sys.exit(0) if dst_type == "application/x-NeXus": print "# TOF:", len(d_som1[0]) print "# TOF Axis:", len(d_som1[0].axis[0].val) elif dst_type != "text/num-info": print "# Data Size:", len(d_som1[0]) print "# X-Axis:", len(d_som1[0].axis[0].val) try: axis_len = len(d_som1[0].axis[1].val) print "# Y-Axis:", axis_len except IndexError: pass if timer is not None: timer.getTime(msg="After reading data") else: if dst_type == "application/x-NeXus": d_som_t = data_dst.getSOM(data_paths, so_axis, roi_file=signal_roi, mask_file=signal_mask) d_som_t.rekeyNxPars(dataset_type) add_nxpars_sig = True else: if dst_type != "text/Dave2d": d_som_t = data_dst.getSOM(data_paths, roi_file=signal_roi, mask_file=signal_mask) else: d_som_t = data_dst.getSOM(data_paths) add_nxpars_sig = False if timer is not None: timer.getTime(msg="After reading data") d_som1 = common_lib.add_ncerr(d_som_t, d_som1, add_nxpars=add_nxpars_sig) if timer is not None: timer.getTime(msg="After adding spectra") del d_som_t if timer is not None: timer.getTime(msg="After SOM deletion") data_dst.release_resource() del data_dst counter += 1 if timer is not None: timer.getTime(msg="After resource release and DST deletion") if dst_type == "application/x-NeXus": som_key_parts = [dataset_type, "filename"] som_key = "-".join(som_key_parts) d_som1.attr_list[som_key] = filelist else: # Previously written files already have this structure imposed pass return d_som1
def process_dgs_data(obj, conf, bcan, ecan, tcoeff, **kwargs): """ This function combines Steps 7 through 16 in Section 2.1.1 of the data reduction process for Direct Geometry Spectrometers as specified by the document at U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The function takes a calibrated dataset, a L{hlr_utils.Configure} object and processes the data accordingly. @param obj: A calibrated dataset object. @type obj: C{SOM.SOM} @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param bcan: The object containing the black can data. @type bcan: C{SOM.SOM} @param ecan: The object containing the empty can data. @type ecan: C{SOM.SOM} @param tcoeff: The transmission coefficient appropriate to the given data set. @type tcoeff: C{tuple} @param kwargs: A list of keyword arguments that the function accepts: @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword cwp_used: A flag signalling the use of the chopper phase corrections. @type cwp_used: C{bool} @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @return: Object that has undergone all requested processing steps @rtype: C{SOM.SOM} """ import array_manip import common_lib import dr_lib import hlr_utils # Check keywords try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: t = kwargs["timer"] except KeyError: t = None cwp_used = kwargs.get("cwp_used", False) if conf.verbose: print "Processing %s information" % dataset_type # Step 7: Create black can background contribution if bcan is not None: if conf.verbose: print "Creating black can background contribution for %s" \ % dataset_type if t is not None: t.getTime(False) bccoeff = array_manip.sub_ncerr(1.0, 0.0, tcoeff[0], tcoeff[1]) bcan1 = common_lib.mult_ncerr(bcan, bccoeff) if t is not None: t.getTime(msg="After creating black can background contribution ") del bcan else: bcan1 = None # Step 8: Create empty can background contribution if ecan is not None: if conf.verbose: print "Creating empty can background contribution for %s" \ % dataset_type if t is not None: t.getTime(False) ecan1 = common_lib.mult_ncerr(ecan, tcoeff) if t is not None: t.getTime(msg="After creating empty can background contribution ") del ecan else: ecan1 = None # Step 9: Create background spectra if bcan1 is not None or ecan1 is not None and conf.verbose: print "Creating background spectra for %s" % dataset_type if bcan1 is not None and ecan1 is not None: if cwp_used: if conf.verbose: print "Rebinning empty can to black can axis." ecan2 = common_lib.rebin_axis_1D_frac(ecan1, bcan1[0].axis[0].val) else: ecan2 = ecan1 del ecan1 if t is not None: t.getTime(False) b_som = common_lib.add_ncerr(bcan1, ecan2) if t is not None: t.getTime(msg="After creating background spectra ") elif bcan1 is not None and ecan1 is None: b_som = bcan1 elif bcan1 is None and ecan1 is not None: b_som = ecan1 else: b_som = None del bcan1, ecan1 if cwp_used: if conf.verbose: print "Rebinning background spectra to %s" % dataset_type b_som1 = common_lib.rebin_axis_1D_frac(b_som, obj[0].axis[0].val) else: b_som1 = b_som del b_som if conf.dump_ctof_comb and b_som1 is not None: b_som_1 = dr_lib.sum_all_spectra(b_som1) hlr_utils.write_file(conf.output, "text/Spec", b_som_1, output_ext="ctof", extra_tag="background", data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined background TOF information") del b_som_1 # Step 10: Subtract background from data obj1 = dr_lib.subtract_bkg_from_data(obj, b_som1, verbose=conf.verbose, timer=t, dataset1=dataset_type, dataset2="background") del obj, b_som1 # Step 11: Calculate initial velocity if conf.verbose: print "Calculating initial velocity" if t is not None: t.getTime(False) if conf.initial_energy is not None: initial_wavelength = common_lib.energy_to_wavelength(\ conf.initial_energy.toValErrTuple()) initial_velocity = common_lib.wavelength_to_velocity(\ initial_wavelength) else: # This should actually calculate it, but don't have a way right now pass if t is not None: t.getTime(msg="After calculating initial velocity ") # Step 12: Calculate the time-zero offset if conf.time_zero_offset is not None: time_zero_offset = conf.time_zero_offset.toValErrTuple() else: # This should actually calculate it, but don't have a way right now time_zero_offset = (0.0, 0.0) # Step 13: Convert time-of-flight to final velocity if conf.verbose: print "Converting TOF to final velocity DGS" if t is not None: t.getTime(False) obj2 = common_lib.tof_to_final_velocity_dgs(obj1, initial_velocity, time_zero_offset, units="microsecond") if t is not None: t.getTime(msg="After calculating TOF to final velocity DGS ") del obj1 # Step 14: Convert final velocity to final wavelength if conf.verbose: print "Converting final velocity DGS to final wavelength" if t is not None: t.getTime(False) obj3 = common_lib.velocity_to_wavelength(obj2) if t is not None: t.getTime(msg="After calculating velocity to wavelength ") del obj2 if conf.dump_wave_comb: obj3_1 = dr_lib.sum_all_spectra( obj3, rebin_axis=conf.lambda_bins.toNessiList()) hlr_utils.write_file(conf.output, "text/Spec", obj3_1, output_ext="fwv", extra_tag=dataset_type, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined final wavelength information") del obj3_1 # Step 15: Create the detector efficiency if conf.det_eff is not None: if conf.verbose: print "Creating detector efficiency spectra" if t is not None: t.getTime(False) det_eff = dr_lib.create_det_eff(obj3) if t is not None: t.getTime(msg="After creating detector efficiency spectra ") else: det_eff = None # Step 16: Divide the detector pixel spectra by the detector efficiency if det_eff is not None: if conf.verbose: print "Correcting %s for detector efficiency" % dataset_type if t is not None: t.getTime(False) obj4 = common_lib.div_ncerr(obj3, det_eff) if t is not None: t.getTime(msg="After correcting %s for detector efficiency" \ % dataset_type) else: obj4 = obj3 del obj3, det_eff return obj4
def process_igs_data(datalist, conf, **kwargs): """ This function combines Steps 1 through 8 of the data reduction process for Inverse Geometry Spectrometers as specified by the documents at U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The function takes a list of file names, a L{hlr_utils.Configure} object and processes the data accordingly. This function should really only be used in the context of I{amorphous_reduction} and I{calc_norm_eff}. @param datalist: A list containing the filenames of the data to be processed. @type datalist: C{list} of C{string}s @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param kwargs: A list of keyword arguments that the function accepts: @keyword inst_geom_dst: File object that contains instrument geometry information. @type inst_geom_dst: C{DST.GeomDST} @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword tib_const: Object providing the time-independent background constant to subtract. @type tib_const: L{hlr_utils.DrParameter} @keyword bkg_som: Object that will be used for early background subtraction @type bkg_som: C{SOM.SOM} @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @return: Object that has undergone all requested processing steps @rtype: C{SOM.SOM} """ import hlr_utils # Check keywords try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: t = kwargs["timer"] except KeyError: t = None try: if kwargs["tib_const"] is not None: tib_const = kwargs["tib_const"].toValErrTuple() else: tib_const = None except KeyError: tib_const = None try: i_geom_dst = kwargs["inst_geom_dst"] except KeyError: i_geom_dst = None try: bkg_som = kwargs["bkg_som"] except KeyError: bkg_som = None # Step 1: Open appropriate data files if not conf.mc: so_axis = "time_of_flight" else: so_axis = "Time_of_Flight" # Add so_axis to Configure object conf.so_axis = so_axis if conf.verbose: print "Reading %s file" % dataset_type # Special case handling for normalization data. Dynamically trying to # determine if incoming file is a previously calculated one. if dataset_type == "normalization": try: # Check the first incoming file dst_type = hlr_utils.file_peeker(datalist[0]) # If file_peeker succeeds, the DST is different than the function # returns dst_type = "text/num-info" # Let ROI file handle filtering data_paths = None except RuntimeError: # It's a NeXus file dst_type = "application/x-NeXus" data_paths = conf.data_paths.toPath() else: dst_type = "application/x-NeXus" data_paths = conf.data_paths.toPath() # The [0] is to get the data SOM and ignore the None background SOM dp_som0 = dr_lib.add_files(datalist, Data_Paths=data_paths, SO_Axis=so_axis, Signal_ROI=conf.roi_file, dataset_type=dataset_type, dst_type=dst_type, Verbose=conf.verbose, Timer=t) if t is not None: t.getTime(msg="After reading %s " % dataset_type) if dst_type == "text/num-info": # Since we have a pre-calculated normalization dataset, set the flag # and return the SOM now conf.pre_norm = True # Make the labels and units compatible with a NeXus file based SOM dp_som0.setAxisLabel(0, "wavelength") dp_som0.setAxisUnits(0, "Angstroms") dp_som0.setYUnits("Counts/A") return dp_som0 else: if dataset_type == "normalization": # Since we have a NeXus file, we need to continue conf.pre_norm = False # Cut the spectra if necessary dp_somA = dr_lib.cut_spectra(dp_som0, conf.tof_cut_min, conf.tof_cut_max) del dp_som0 dp_som1 = dr_lib.fix_bin_contents(dp_somA) del dp_somA if conf.inst_geom is not None: i_geom_dst.setGeometry(conf.data_paths.toPath(), dp_som1) if conf.no_mon_norm: dm_som1 = None else: if conf.verbose: print "Reading in monitor data from %s file" % dataset_type # The [0] is to get the data SOM and ignore the None background SOM dm_som0 = dr_lib.add_files(datalist, Data_Paths=conf.mon_path.toPath(), SO_Axis=so_axis, dataset_type=dataset_type, Verbose=conf.verbose, Timer=t) if t is not None: t.getTime(msg="After reading monitor data ") dm_som1 = dr_lib.fix_bin_contents(dm_som0) del dm_som0 if conf.inst_geom is not None: i_geom_dst.setGeometry(conf.mon_path.toPath(), dm_som1) if bkg_som is not None: bkg_pcharge = bkg_som.attr_list["background-proton_charge"].getValue() data_pcharge = dp_som1.attr_list[dataset_type + "-proton_charge"].getValue() ratio = data_pcharge / bkg_pcharge bkg_som1 = common_lib.mult_ncerr(bkg_som, (ratio, 0.0)) del bkg_som dp_som2 = dr_lib.subtract_bkg_from_data(dp_som1, bkg_som1, verbose=conf.verbose, timer=t, dataset1=dataset_type, dataset2="background") else: dp_som2 = dp_som1 del dp_som1 # Step 2: Dead Time Correction # No dead time correction is being applied to the data yet # Step 3: Time-independent background determination if conf.verbose and conf.tib_tofs is not None: print "Determining time-independent background from data" if t is not None and conf.tib_tofs is not None: t.getTime(False) B = dr_lib.determine_time_indep_bkg(dp_som2, conf.tib_tofs) if t is not None and B is not None: t.getTime(msg="After determining time-independent background ") if conf.dump_tib and B is not None: file_comment = "TOFs: %s" % conf.tib_tofs hlr_utils.write_file(conf.output, "text/num-info", B, output_ext="tib", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="time-independent background "\ +"information", tag="Average", units="counts", comments=[file_comment]) # Step 4: Subtract time-independent background if conf.verbose and B is not None: print "Subtracting time-independent background from data" if t is not None: t.getTime(False) if B is not None: dp_som3 = common_lib.sub_ncerr(dp_som2, B) else: dp_som3 = dp_som2 if B is not None and t is not None: t.getTime(msg="After subtracting time-independent background ") del dp_som2, B # Step 5: Subtract time-independent background constant if conf.verbose and tib_const is not None: print "Subtracting time-independent background constant from data" if t is not None and tib_const is not None: t.getTime(False) if tib_const is not None: dp_som4 = common_lib.sub_ncerr(dp_som3, tib_const) else: dp_som4 = dp_som3 if t is not None and tib_const is not None: t.getTime(msg="After subtracting time-independent background "\ +"constant ") del dp_som3 # Provide override capability for final wavelength, time-zero slope and # time-zero offset if conf.wavelength_final is not None: dp_som4.attr_list["Wavelength_final"] = \ conf.wavelength_final.toValErrTuple() # Note: time_zero_slope MUST be a tuple if conf.time_zero_slope is not None: dp_som4.attr_list["Time_zero_slope"] = \ conf.time_zero_slope.toValErrTuple() if dm_som1 is not None: dm_som1.attr_list["Time_zero_slope"] = \ conf.time_zero_slope.toValErrTuple() # Note: time_zero_offset MUST be a tuple if conf.time_zero_offset is not None: dp_som4.attr_list["Time_zero_offset"] = \ conf.time_zero_offset.toValErrTuple() if dm_som1 is not None: dm_som1.attr_list["Time_zero_offset"] = \ conf.time_zero_offset.toValErrTuple() # Step 6: Convert TOF to wavelength for data and monitor if conf.verbose: print "Converting TOF to wavelength" if t is not None: t.getTime(False) # Convert monitor if dm_som1 is not None: dm_som2 = common_lib.tof_to_wavelength_lin_time_zero( dm_som1, units="microsecond") else: dm_som2 = None # Convert detector pixels dp_som5 = common_lib.tof_to_initial_wavelength_igs_lin_time_zero( dp_som4, units="microsecond", run_filter=conf.filter) if t is not None: t.getTime(msg="After converting TOF to wavelength ") if conf.dump_wave: hlr_utils.write_file(conf.output, "text/Spec", dp_som5, output_ext="pxl", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="pixel wavelength information") if conf.dump_mon_wave and dm_som2 is not None: hlr_utils.write_file(conf.output, "text/Spec", dm_som2, output_ext="mxl", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="monitor wavelength information") del dp_som4, dm_som1 # Step 7: Efficiency correct monitor if conf.verbose and dm_som2 is not None and not conf.no_mon_effc: print "Efficiency correct monitor data" if t is not None: t.getTime(False) if not conf.no_mon_effc: dm_som3 = dr_lib.feff_correct_mon(dm_som2) else: dm_som3 = dm_som2 if t is not None and dm_som2 is not None and not conf.no_mon_effc: t.getTime(msg="After efficiency correcting monitor ") if conf.dump_mon_effc and not conf.no_mon_effc and dm_som3 is not None: hlr_utils.write_file(conf.output, "text/Spec", dm_som3, output_ext="mel", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="monitor wavelength information "\ +"(efficiency)") del dm_som2 # Step 8: Rebin monitor axis onto detector pixel axis if conf.verbose and dm_som3 is not None: print "Rebin monitor axis to detector pixel axis" if t is not None: t.getTime(False) dm_som4 = dr_lib.rebin_monitor(dm_som3, dp_som5) if t is not None and dm_som4 is not None: t.getTime(msg="After rebinning monitor ") del dm_som3 if conf.dump_mon_rebin and dm_som4 is not None: hlr_utils.write_file(conf.output, "text/Spec", dm_som4, output_ext="mrl", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="monitor wavelength information "\ +"(rebinned)") # The lambda-dependent background is only done on sample data (aka data) # for the BSS instrument at the SNS if conf.inst == "BSS" and conf.ldb_const is not None and \ dataset_type == "data": # Step 9: Convert chopper center wavelength to TOF center if conf.verbose: print "Converting chopper center wavelength to TOF" if t is not None: t.getTime(False) tof_center = dr_lib.convert_single_to_list(\ "initial_wavelength_igs_lin_time_zero_to_tof", conf.chopper_lambda_cent.toValErrTuple(), dp_som5) # Step 10: Calculate beginning and end of detector TOF spectrum if conf.verbose: print "Calculating beginning and ending TOF ranges" half_inv_chop_freq = 0.5 / conf.chopper_freq.toValErrTuple()[0] # Above is in seconds, need microseconds half_inv_chop_freq *= 1.0e6 tof_begin = common_lib.sub_ncerr(tof_center, (half_inv_chop_freq, 0.0)) tof_end = common_lib.add_ncerr(tof_center, (half_inv_chop_freq, 0.0)) # Step 11: Convert TOF_begin and TOF_end to wavelength if conf.verbose: print "Converting TOF_begin and TOF_end to wavelength" # Check for time-zero slope information try: tz_slope = conf.time_zero_slope.toValErrTuple() except AttributeError: tz_slope = (0.0, 0.0) # Check for time-zero offset information try: tz_offset = conf.time_zero_offset.toValErrTuple() except AttributeError: tz_offset = (0.0, 0.0) l_begin = common_lib.tof_to_initial_wavelength_igs_lin_time_zero(\ tof_begin, time_zero_slope=tz_slope, time_zero_offset=tz_offset, iobj=dp_som5, run_filter=False) l_end = common_lib.tof_to_initial_wavelength_igs_lin_time_zero(\ tof_end, time_zero_slope=tz_slope, time_zero_offset=tz_offset, iobj=dp_som5, run_filter=False) # Step 12: tof-least-bkg to lambda-least-bkg if conf.verbose: print "Converting TOF least background to wavelength" lambda_least_bkg = dr_lib.convert_single_to_list(\ "tof_to_initial_wavelength_igs_lin_time_zero", conf.tof_least_bkg.toValErrTuple(), dp_som5) if t is not None: t.getTime(msg="After converting boundary positions ") # Step 13: Create lambda-dependent background spectrum if conf.verbose: print "Creating lambda-dependent background spectra" if t is not None: t.getTime(False) ldb_som = dr_lib.shift_spectrum(dm_som4, lambda_least_bkg, l_begin, l_end, conf.ldb_const.getValue()) if t is not None: t.getTime(msg="After creating lambda-dependent background "\ +"spectra ") # Step 14: Subtract lambda-dependent background from sample data if conf.verbose: print "Subtracting lambda-dependent background from data" if t is not None: t.getTime(False) dp_som6 = common_lib.sub_ncerr(dp_som5, ldb_som) if t is not None: t.getTime(msg="After subtracting lambda-dependent background "\ +"from data ") else: dp_som6 = dp_som5 del dp_som5 # Step 15: Normalize data by monitor if conf.verbose and dm_som4 is not None: print "Normalizing data by monitor" if t is not None: t.getTime(False) if dm_som4 is not None: dp_som7 = common_lib.div_ncerr(dp_som6, dm_som4) if t is not None: t.getTime(msg="After normalizing data by monitor ") else: dp_som7 = dp_som6 if conf.dump_wave_mnorm: dp_som7_1 = dr_lib.sum_all_spectra(dp_som7,\ rebin_axis=conf.lambda_bins.toNessiList()) write_message = "combined pixel wavelength information" if dm_som4 is not None: write_message += " (monitor normalized)" hlr_utils.write_file(conf.output, "text/Spec", dp_som7_1, output_ext="pml", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message=write_message) del dp_som7_1 del dm_som4, dp_som6 return dp_som7
def process_igs_data(datalist, conf, **kwargs): """ This function combines Steps 1 through 8 of the data reduction process for Inverse Geometry Spectrometers as specified by the documents at U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The function takes a list of file names, a L{hlr_utils.Configure} object and processes the data accordingly. This function should really only be used in the context of I{amorphous_reduction} and I{calc_norm_eff}. @param datalist: A list containing the filenames of the data to be processed. @type datalist: C{list} of C{string}s @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param kwargs: A list of keyword arguments that the function accepts: @keyword inst_geom_dst: File object that contains instrument geometry information. @type inst_geom_dst: C{DST.GeomDST} @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword tib_const: Object providing the time-independent background constant to subtract. @type tib_const: L{hlr_utils.DrParameter} @keyword bkg_som: Object that will be used for early background subtraction @type bkg_som: C{SOM.SOM} @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @return: Object that has undergone all requested processing steps @rtype: C{SOM.SOM} """ import hlr_utils # Check keywords try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: t = kwargs["timer"] except KeyError: t = None try: if kwargs["tib_const"] is not None: tib_const = kwargs["tib_const"].toValErrTuple() else: tib_const = None except KeyError: tib_const = None try: i_geom_dst = kwargs["inst_geom_dst"] except KeyError: i_geom_dst = None try: bkg_som = kwargs["bkg_som"] except KeyError: bkg_som = None # Step 1: Open appropriate data files if not conf.mc: so_axis = "time_of_flight" else: so_axis = "Time_of_Flight" # Add so_axis to Configure object conf.so_axis = so_axis if conf.verbose: print "Reading %s file" % dataset_type # Special case handling for normalization data. Dynamically trying to # determine if incoming file is a previously calculated one. if dataset_type == "normalization": try: # Check the first incoming file dst_type = hlr_utils.file_peeker(datalist[0]) # If file_peeker succeeds, the DST is different than the function # returns dst_type = "text/num-info" # Let ROI file handle filtering data_paths = None except RuntimeError: # It's a NeXus file dst_type = "application/x-NeXus" data_paths = conf.data_paths.toPath() else: dst_type = "application/x-NeXus" data_paths = conf.data_paths.toPath() # The [0] is to get the data SOM and ignore the None background SOM dp_som0 = dr_lib.add_files( datalist, Data_Paths=data_paths, SO_Axis=so_axis, Signal_ROI=conf.roi_file, dataset_type=dataset_type, dst_type=dst_type, Verbose=conf.verbose, Timer=t, ) if t is not None: t.getTime(msg="After reading %s " % dataset_type) if dst_type == "text/num-info": # Since we have a pre-calculated normalization dataset, set the flag # and return the SOM now conf.pre_norm = True # Make the labels and units compatible with a NeXus file based SOM dp_som0.setAxisLabel(0, "wavelength") dp_som0.setAxisUnits(0, "Angstroms") dp_som0.setYUnits("Counts/A") return dp_som0 else: if dataset_type == "normalization": # Since we have a NeXus file, we need to continue conf.pre_norm = False # Cut the spectra if necessary dp_somA = dr_lib.cut_spectra(dp_som0, conf.tof_cut_min, conf.tof_cut_max) del dp_som0 dp_som1 = dr_lib.fix_bin_contents(dp_somA) del dp_somA if conf.inst_geom is not None: i_geom_dst.setGeometry(conf.data_paths.toPath(), dp_som1) if conf.no_mon_norm: dm_som1 = None else: if conf.verbose: print "Reading in monitor data from %s file" % dataset_type # The [0] is to get the data SOM and ignore the None background SOM dm_som0 = dr_lib.add_files( datalist, Data_Paths=conf.mon_path.toPath(), SO_Axis=so_axis, dataset_type=dataset_type, Verbose=conf.verbose, Timer=t, ) if t is not None: t.getTime(msg="After reading monitor data ") dm_som1 = dr_lib.fix_bin_contents(dm_som0) del dm_som0 if conf.inst_geom is not None: i_geom_dst.setGeometry(conf.mon_path.toPath(), dm_som1) if bkg_som is not None: bkg_pcharge = bkg_som.attr_list["background-proton_charge"].getValue() data_pcharge = dp_som1.attr_list[dataset_type + "-proton_charge"].getValue() ratio = data_pcharge / bkg_pcharge bkg_som1 = common_lib.mult_ncerr(bkg_som, (ratio, 0.0)) del bkg_som dp_som2 = dr_lib.subtract_bkg_from_data( dp_som1, bkg_som1, verbose=conf.verbose, timer=t, dataset1=dataset_type, dataset2="background" ) else: dp_som2 = dp_som1 del dp_som1 # Step 2: Dead Time Correction # No dead time correction is being applied to the data yet # Step 3: Time-independent background determination if conf.verbose and conf.tib_tofs is not None: print "Determining time-independent background from data" if t is not None and conf.tib_tofs is not None: t.getTime(False) B = dr_lib.determine_time_indep_bkg(dp_som2, conf.tib_tofs) if t is not None and B is not None: t.getTime(msg="After determining time-independent background ") if conf.dump_tib and B is not None: file_comment = "TOFs: %s" % conf.tib_tofs hlr_utils.write_file( conf.output, "text/num-info", B, output_ext="tib", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="time-independent background " + "information", tag="Average", units="counts", comments=[file_comment], ) # Step 4: Subtract time-independent background if conf.verbose and B is not None: print "Subtracting time-independent background from data" if t is not None: t.getTime(False) if B is not None: dp_som3 = common_lib.sub_ncerr(dp_som2, B) else: dp_som3 = dp_som2 if B is not None and t is not None: t.getTime(msg="After subtracting time-independent background ") del dp_som2, B # Step 5: Subtract time-independent background constant if conf.verbose and tib_const is not None: print "Subtracting time-independent background constant from data" if t is not None and tib_const is not None: t.getTime(False) if tib_const is not None: dp_som4 = common_lib.sub_ncerr(dp_som3, tib_const) else: dp_som4 = dp_som3 if t is not None and tib_const is not None: t.getTime(msg="After subtracting time-independent background " + "constant ") del dp_som3 # Provide override capability for final wavelength, time-zero slope and # time-zero offset if conf.wavelength_final is not None: dp_som4.attr_list["Wavelength_final"] = conf.wavelength_final.toValErrTuple() # Note: time_zero_slope MUST be a tuple if conf.time_zero_slope is not None: dp_som4.attr_list["Time_zero_slope"] = conf.time_zero_slope.toValErrTuple() if dm_som1 is not None: dm_som1.attr_list["Time_zero_slope"] = conf.time_zero_slope.toValErrTuple() # Note: time_zero_offset MUST be a tuple if conf.time_zero_offset is not None: dp_som4.attr_list["Time_zero_offset"] = conf.time_zero_offset.toValErrTuple() if dm_som1 is not None: dm_som1.attr_list["Time_zero_offset"] = conf.time_zero_offset.toValErrTuple() # Step 6: Convert TOF to wavelength for data and monitor if conf.verbose: print "Converting TOF to wavelength" if t is not None: t.getTime(False) # Convert monitor if dm_som1 is not None: dm_som2 = common_lib.tof_to_wavelength_lin_time_zero(dm_som1, units="microsecond") else: dm_som2 = None # Convert detector pixels dp_som5 = common_lib.tof_to_initial_wavelength_igs_lin_time_zero( dp_som4, units="microsecond", run_filter=conf.filter ) if t is not None: t.getTime(msg="After converting TOF to wavelength ") if conf.dump_wave: hlr_utils.write_file( conf.output, "text/Spec", dp_som5, output_ext="pxl", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="pixel wavelength information", ) if conf.dump_mon_wave and dm_som2 is not None: hlr_utils.write_file( conf.output, "text/Spec", dm_som2, output_ext="mxl", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="monitor wavelength information", ) del dp_som4, dm_som1 # Step 7: Efficiency correct monitor if conf.verbose and dm_som2 is not None and not conf.no_mon_effc: print "Efficiency correct monitor data" if t is not None: t.getTime(False) if not conf.no_mon_effc: dm_som3 = dr_lib.feff_correct_mon(dm_som2) else: dm_som3 = dm_som2 if t is not None and dm_som2 is not None and not conf.no_mon_effc: t.getTime(msg="After efficiency correcting monitor ") if conf.dump_mon_effc and not conf.no_mon_effc and dm_som3 is not None: hlr_utils.write_file( conf.output, "text/Spec", dm_som3, output_ext="mel", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="monitor wavelength information " + "(efficiency)", ) del dm_som2 # Step 8: Rebin monitor axis onto detector pixel axis if conf.verbose and dm_som3 is not None: print "Rebin monitor axis to detector pixel axis" if t is not None: t.getTime(False) dm_som4 = dr_lib.rebin_monitor(dm_som3, dp_som5) if t is not None and dm_som4 is not None: t.getTime(msg="After rebinning monitor ") del dm_som3 if conf.dump_mon_rebin and dm_som4 is not None: hlr_utils.write_file( conf.output, "text/Spec", dm_som4, output_ext="mrl", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="monitor wavelength information " + "(rebinned)", ) # The lambda-dependent background is only done on sample data (aka data) # for the BSS instrument at the SNS if conf.inst == "BSS" and conf.ldb_const is not None and dataset_type == "data": # Step 9: Convert chopper center wavelength to TOF center if conf.verbose: print "Converting chopper center wavelength to TOF" if t is not None: t.getTime(False) tof_center = dr_lib.convert_single_to_list( "initial_wavelength_igs_lin_time_zero_to_tof", conf.chopper_lambda_cent.toValErrTuple(), dp_som5 ) # Step 10: Calculate beginning and end of detector TOF spectrum if conf.verbose: print "Calculating beginning and ending TOF ranges" half_inv_chop_freq = 0.5 / conf.chopper_freq.toValErrTuple()[0] # Above is in seconds, need microseconds half_inv_chop_freq *= 1.0e6 tof_begin = common_lib.sub_ncerr(tof_center, (half_inv_chop_freq, 0.0)) tof_end = common_lib.add_ncerr(tof_center, (half_inv_chop_freq, 0.0)) # Step 11: Convert TOF_begin and TOF_end to wavelength if conf.verbose: print "Converting TOF_begin and TOF_end to wavelength" # Check for time-zero slope information try: tz_slope = conf.time_zero_slope.toValErrTuple() except AttributeError: tz_slope = (0.0, 0.0) # Check for time-zero offset information try: tz_offset = conf.time_zero_offset.toValErrTuple() except AttributeError: tz_offset = (0.0, 0.0) l_begin = common_lib.tof_to_initial_wavelength_igs_lin_time_zero( tof_begin, time_zero_slope=tz_slope, time_zero_offset=tz_offset, iobj=dp_som5, run_filter=False ) l_end = common_lib.tof_to_initial_wavelength_igs_lin_time_zero( tof_end, time_zero_slope=tz_slope, time_zero_offset=tz_offset, iobj=dp_som5, run_filter=False ) # Step 12: tof-least-bkg to lambda-least-bkg if conf.verbose: print "Converting TOF least background to wavelength" lambda_least_bkg = dr_lib.convert_single_to_list( "tof_to_initial_wavelength_igs_lin_time_zero", conf.tof_least_bkg.toValErrTuple(), dp_som5 ) if t is not None: t.getTime(msg="After converting boundary positions ") # Step 13: Create lambda-dependent background spectrum if conf.verbose: print "Creating lambda-dependent background spectra" if t is not None: t.getTime(False) ldb_som = dr_lib.shift_spectrum(dm_som4, lambda_least_bkg, l_begin, l_end, conf.ldb_const.getValue()) if t is not None: t.getTime(msg="After creating lambda-dependent background " + "spectra ") # Step 14: Subtract lambda-dependent background from sample data if conf.verbose: print "Subtracting lambda-dependent background from data" if t is not None: t.getTime(False) dp_som6 = common_lib.sub_ncerr(dp_som5, ldb_som) if t is not None: t.getTime(msg="After subtracting lambda-dependent background " + "from data ") else: dp_som6 = dp_som5 del dp_som5 # Step 15: Normalize data by monitor if conf.verbose and dm_som4 is not None: print "Normalizing data by monitor" if t is not None: t.getTime(False) if dm_som4 is not None: dp_som7 = common_lib.div_ncerr(dp_som6, dm_som4) if t is not None: t.getTime(msg="After normalizing data by monitor ") else: dp_som7 = dp_som6 if conf.dump_wave_mnorm: dp_som7_1 = dr_lib.sum_all_spectra(dp_som7, rebin_axis=conf.lambda_bins.toNessiList()) write_message = "combined pixel wavelength information" if dm_som4 is not None: write_message += " (monitor normalized)" hlr_utils.write_file( conf.output, "text/Spec", dp_som7_1, output_ext="pml", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message=write_message, ) del dp_som7_1 del dm_som4, dp_som6 return dp_som7
def process_dgs_data(obj, conf, bcan, ecan, tcoeff, **kwargs): """ This function combines Steps 7 through 16 in Section 2.1.1 of the data reduction process for Direct Geometry Spectrometers as specified by the document at U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The function takes a calibrated dataset, a L{hlr_utils.Configure} object and processes the data accordingly. @param obj: A calibrated dataset object. @type obj: C{SOM.SOM} @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param bcan: The object containing the black can data. @type bcan: C{SOM.SOM} @param ecan: The object containing the empty can data. @type ecan: C{SOM.SOM} @param tcoeff: The transmission coefficient appropriate to the given data set. @type tcoeff: C{tuple} @param kwargs: A list of keyword arguments that the function accepts: @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword cwp_used: A flag signalling the use of the chopper phase corrections. @type cwp_used: C{bool} @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @return: Object that has undergone all requested processing steps @rtype: C{SOM.SOM} """ import array_manip import common_lib import dr_lib import hlr_utils # Check keywords try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: t = kwargs["timer"] except KeyError: t = None cwp_used = kwargs.get("cwp_used", False) if conf.verbose: print "Processing %s information" % dataset_type # Step 7: Create black can background contribution if bcan is not None: if conf.verbose: print "Creating black can background contribution for %s" \ % dataset_type if t is not None: t.getTime(False) bccoeff = array_manip.sub_ncerr(1.0, 0.0, tcoeff[0], tcoeff[1]) bcan1 = common_lib.mult_ncerr(bcan, bccoeff) if t is not None: t.getTime(msg="After creating black can background contribution ") del bcan else: bcan1 = None # Step 8: Create empty can background contribution if ecan is not None: if conf.verbose: print "Creating empty can background contribution for %s" \ % dataset_type if t is not None: t.getTime(False) ecan1 = common_lib.mult_ncerr(ecan, tcoeff) if t is not None: t.getTime(msg="After creating empty can background contribution ") del ecan else: ecan1 = None # Step 9: Create background spectra if bcan1 is not None or ecan1 is not None and conf.verbose: print "Creating background spectra for %s" % dataset_type if bcan1 is not None and ecan1 is not None: if cwp_used: if conf.verbose: print "Rebinning empty can to black can axis." ecan2 = common_lib.rebin_axis_1D_frac(ecan1, bcan1[0].axis[0].val) else: ecan2 = ecan1 del ecan1 if t is not None: t.getTime(False) b_som = common_lib.add_ncerr(bcan1, ecan2) if t is not None: t.getTime(msg="After creating background spectra ") elif bcan1 is not None and ecan1 is None: b_som = bcan1 elif bcan1 is None and ecan1 is not None: b_som = ecan1 else: b_som = None del bcan1, ecan1 if cwp_used: if conf.verbose: print "Rebinning background spectra to %s" % dataset_type b_som1 = common_lib.rebin_axis_1D_frac(b_som, obj[0].axis[0].val) else: b_som1 = b_som del b_som if conf.dump_ctof_comb and b_som1 is not None: b_som_1 = dr_lib.sum_all_spectra(b_som1) hlr_utils.write_file(conf.output, "text/Spec", b_som_1, output_ext="ctof", extra_tag="background", data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined background TOF information") del b_som_1 # Step 10: Subtract background from data obj1 = dr_lib.subtract_bkg_from_data(obj, b_som1, verbose=conf.verbose, timer=t, dataset1=dataset_type, dataset2="background") del obj, b_som1 # Step 11: Calculate initial velocity if conf.verbose: print "Calculating initial velocity" if t is not None: t.getTime(False) if conf.initial_energy is not None: initial_wavelength = common_lib.energy_to_wavelength(\ conf.initial_energy.toValErrTuple()) initial_velocity = common_lib.wavelength_to_velocity(\ initial_wavelength) else: # This should actually calculate it, but don't have a way right now pass if t is not None: t.getTime(msg="After calculating initial velocity ") # Step 12: Calculate the time-zero offset if conf.time_zero_offset is not None: time_zero_offset = conf.time_zero_offset.toValErrTuple() else: # This should actually calculate it, but don't have a way right now time_zero_offset = (0.0, 0.0) # Step 13: Convert time-of-flight to final velocity if conf.verbose: print "Converting TOF to final velocity DGS" if t is not None: t.getTime(False) obj2 = common_lib.tof_to_final_velocity_dgs(obj1, initial_velocity, time_zero_offset, units="microsecond") if t is not None: t.getTime(msg="After calculating TOF to final velocity DGS ") del obj1 # Step 14: Convert final velocity to final wavelength if conf.verbose: print "Converting final velocity DGS to final wavelength" if t is not None: t.getTime(False) obj3 = common_lib.velocity_to_wavelength(obj2) if t is not None: t.getTime(msg="After calculating velocity to wavelength ") del obj2 if conf.dump_wave_comb: obj3_1 = dr_lib.sum_all_spectra(obj3, rebin_axis=conf.lambda_bins.toNessiList()) hlr_utils.write_file(conf.output, "text/Spec", obj3_1, output_ext="fwv", extra_tag=dataset_type, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined final wavelength information") del obj3_1 # Step 15: Create the detector efficiency if conf.det_eff is not None: if conf.verbose: print "Creating detector efficiency spectra" if t is not None: t.getTime(False) det_eff = dr_lib.create_det_eff(obj3) if t is not None: t.getTime(msg="After creating detector efficiency spectra ") else: det_eff = None # Step 16: Divide the detector pixel spectra by the detector efficiency if det_eff is not None: if conf.verbose: print "Correcting %s for detector efficiency" % dataset_type if t is not None: t.getTime(False) obj4 = common_lib.div_ncerr(obj3, det_eff) if t is not None: t.getTime(msg="After correcting %s for detector efficiency" \ % dataset_type) else: obj4 = obj3 del obj3, det_eff return obj4
def add_files_dm(filelist, **kwargs): """ This function takes a list of U{NeXus<www.nexusformat.org>} files and various keyword arguments and returns a data C{SOM} and a monitor C{SOM} that is the sum of all the data from the specified files. B{It is assumed that the files contain similar data as only crude cross-checks will be made. You have been warned.} @param filelist: A list containing the names of the files to sum @type filelist: C{list} @param kwargs: A list of keyword arguments that the function accepts: @keyword SO_Axis: This is the name of the main axis to read from the NeXus file @type SO_Axis: C{string} @keyword Data_Paths: This contains the data paths and signals for the requested detector banks @type Data_Paths: C{tuple} of C{tuple}s @keyword Mon_Paths: This contains the data paths and signals for the requested monitor banks @type Mon_Paths: C{tuple} of C{tuple}s @keyword Signal_ROI: This is the name of a file that contains a list of pixel IDs that will be read from the data file and stored as a signal C{SOM} @type Signal_ROI: C{string} @keyword Signal_MASK: This is the name of a file that contains a list of pixel IDs that will be read from the data file and stored as a signal C{SOM} @type Signal_MASK: C{string} @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword dataset_cwp: A set of chopper phase corrections for the dataset. This will instruct the function to shift the TOF axes of mulitple datasets and perform rebinning. The TOF axis for the first dataset is the one that all other datasets will be rebinned to. @type dataset_cwp: C{list} of C{float}s @keyword Verbose: This is a flag to turn on print statments. The default is I{False}. @type Verbose: C{boolean} @keyword Timer: This is an SNS Timer object used for showing the performance timing in the function. @type Timer: C{sns_timing.Timer} @return: Signal C{SOM.SOM} and monitor C{SOM.SOM} @rtype: C{tuple} @raise SystemExit: If any file cannot be read @raise RuntimeError: If both a ROI and MASK file are specified """ import sys import common_lib import DST # Parse keywords try: so_axis = kwargs["SO_Axis"] except KeyError: so_axis = "time_of_flight" try: data_paths = kwargs["Data_Paths"] except KeyError: data_paths = None try: mon_paths = kwargs["Mon_Paths"] except KeyError: mon_paths = None try: signal_roi = kwargs["Signal_ROI"] except KeyError: signal_roi = None try: signal_mask = kwargs["Signal_MASK"] except KeyError: signal_mask = None try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: verbose = kwargs["Verbose"] except KeyError: verbose = False try: timer = kwargs["Timer"] except KeyError: timer = None dataset_cwp = kwargs.get("dataset_cwp") if signal_roi is not None and signal_mask is not None: raise RuntimeError("Cannot specify both ROI and MASK file! Please "\ +"choose!") dst_type = "application/x-NeXus" counter = 0 for filename in filelist: if verbose: print "File:", filename if dataset_cwp is not None: print "TOF Offset:", dataset_cwp[counter] if dataset_cwp is not None: cwp = dataset_cwp[counter] else: cwp = None try: data_dst = DST.getInstance(dst_type, filename) except SystemError: print "ERROR: Failed to data read file %s" % filename sys.exit(-1) if timer is not None: timer.getTime(msg="After parsing file") if verbose: print "Reading data file %d" % counter if counter == 0: d_som1 = data_dst.getSOM(data_paths, so_axis, roi_file=signal_roi, mask_file=signal_mask, tof_offset=cwp) d_som1.rekeyNxPars(dataset_type) if verbose: print "# Signal SO:", len(d_som1) try: print "# TOF:", len(d_som1[0]) print "# TOF Axis:", len(d_som1[0].axis[0].val) except IndexError: # No data is present so say so again print "information is unavailable since no data "\ +"present. Exiting." sys.exit(0) if timer is not None: timer.getTime(msg="After reading data") if mon_paths is not None: if verbose: print "Reading monitor %d" % counter if counter == 0: m_som1 = data_dst.getSOM(mon_paths, so_axis, tof_offset=cwp) m_som1.rekeyNxPars(dataset_type) if verbose: print "# Monitor SO:", len(m_som1) print "# TOF:", len(m_som1[0]) print "# TOF Axis:", len(m_som1[0].axis[0].val) if timer is not None: timer.getTime(msg="After reading monitor data") else: m_som1 = None else: d_som_t0 = data_dst.getSOM(data_paths, so_axis, roi_file=signal_roi, mask_file=signal_mask, tof_offset=cwp) d_som_t0.rekeyNxPars(dataset_type) if timer is not None: timer.getTime(msg="After reading data") if dataset_cwp is not None: d_som_t = common_lib.rebin_axis_1D_frac(d_som_t0, d_som1[0].axis[0].val) del d_som_t0 else: d_som_t = d_som_t0 d_som1 = common_lib.add_ncerr(d_som_t, d_som1, add_nxpars=True) if timer is not None: timer.getTime(msg="After adding data spectra") del d_som_t if timer is not None: timer.getTime(msg="After data SOM deletion") if mon_paths is not None: m_som_t0 = data_dst.getSOM(mon_paths, so_axis, tof_offset=cwp) m_som_t0.rekeyNxPars(dataset_type) if timer is not None: timer.getTime(msg="After reading monitor data") if dataset_cwp is not None: m_som_t = common_lib.rebin_axis_1D_frac(m_som_t0, m_som1[0].axis[0].val) del m_som_t0 else: m_som_t = m_som_t0 m_som1 = common_lib.add_ncerr(m_som_t, m_som1, add_nxpars=True) if timer is not None: timer.getTime(msg="After adding monitor spectra") del m_som_t if timer is not None: timer.getTime(msg="After monitor SOM deletion") data_dst.release_resource() del data_dst counter += 1 if timer is not None: timer.getTime(msg="After resource release and DST deletion") som_key_parts = [dataset_type, "filename"] som_key = "-".join(som_key_parts) d_som1.attr_list[som_key] = filelist if m_som1 is not None: m_som1.attr_list[som_key] = filelist return (d_som1, m_som1)