def run(config, tim=None): """ This method is where the data reduction process gets done. @param config: Object containing the data reduction configuration information. @type config: L{hlr_utils.Configure} @param tim: (OPTIONAL) Object that will allow the method to perform timing evaluations. @type tim: C{sns_time.DiffTime} """ import common_lib import dr_lib import DST if tim is not None: tim.getTime(False) old_time = tim.getOldTime() if config.data is None: raise RuntimeError("Need to pass a data filename to the driver "\ +"script.") # Read in geometry if one is provided if config.inst_geom is not None: if config.verbose: print "Reading in instrument geometry file" inst_geom_dst = DST.getInstance("application/x-NxsGeom", config.inst_geom) else: inst_geom_dst = None config.so_axis = "time_of_flight" # Steps 1-3: Produce a scaled summed dark current dataset dc_som = dr_lib.scaled_summed_data(config.dkcur, config, dataset_type="dark_current", timer=tim) # Perform Steps 3-6 on black can data if config.bcan is not None: b_som1 = dr_lib.calibrate_dgs_data(config.bcan, config, dc_som, dataset_type="black_can", inst_geom_dst=inst_geom_dst, tib_const=config.tib_const, cwp=config.cwp_bcan, timer=tim) else: b_som1 = None # Perform Steps 3-6 on empty can data if config.ecan is not None: e_som1 = dr_lib.calibrate_dgs_data(config.ecan, config, dc_som, dataset_type="empty_can", inst_geom_dst=inst_geom_dst, tib_const=config.tib_const, cwp=config.cwp_ecan, timer=tim) else: e_som1 = None # Perform Steps 3-6 on normalization data n_som1 = dr_lib.calibrate_dgs_data(config.data, config, dc_som, dataset_type="normalization", inst_geom_dst=inst_geom_dst, tib_const=config.tib_const, cwp=config.cwp_data, timer=tim) # Perform Steps 7-16 on normalization data if config.norm_trans_coeff is None: norm_trans_coeff = None else: norm_trans_coeff = config.norm_trans_coeff.toValErrTuple() # Determine if we need to rebin the empty or black can data if config.ecan is not None and e_som1 is not None: ecan_cwp = True else: ecan_cwp = False if config.bcan is not None and b_som1 is not None: bcan_cwp = True else: bcan_cwp = False cwp_used = ecan_cwp or bcan_cwp n_som2 = dr_lib.process_dgs_data(n_som1, config, b_som1, e_som1, norm_trans_coeff, dataset_type="normalization", cwp_used=cwp_used, timer=tim) del n_som1, b_som1, e_som1 # Step 17: Integrate normalization spectra if config.verbose: print "Integrating normalization spectra" if tim is not None: tim.getTime(False) if config.norm_int_range is None: start_val = float("inf") end_val = float("inf") else: if not config.wb_norm: # Translate energy transfer to final energy ef_start = config.initial_energy.getValue() - \ config.norm_int_range[0] ef_end = config.initial_energy.getValue() - \ config.norm_int_range[1] # Convert final energy to final wavelength start_val = common_lib.energy_to_wavelength((ef_start, 0.0))[0] end_val = common_lib.energy_to_wavelength((ef_end, 0.0))[0] else: start_val = config.norm_int_range[0] end_val = config.norm_int_range[1] n_som3 = dr_lib.integrate_spectra(n_som2, start=start_val, end=end_val, width=True) del n_som2 if tim is not None: tim.getTime(msg="After integrating normalization spectra ") file_comment = "Normalization Integration range: %0.3fA, %0.3fA" \ % (start_val, end_val) hlr_utils.write_file(config.output, "text/num-info", n_som3, output_ext="norm", data_ext=config.ext_replacement, path_replacement=config.path_replacement, verbose=config.verbose, message="normalization values", comments=[file_comment], tag="Integral", units="counts") if tim is not None: tim.getTime(False) if config.verbose: print "Making mask file" # Make mask file from threshold dr_lib.filter_normalization(n_som3, config.lo_threshold, config.hi_threshold, config) if tim is not None: tim.getTime(msg="After making mask file ") # Write out RMD file n_som3.attr_list["config"] = config hlr_utils.write_file(config.output, "text/rmd", n_som3, output_ext="rmd", data_ext=config.ext_replacement, path_replacement=config.path_replacement, verbose=config.verbose, message="metadata") if tim is not None: tim.setOldTime(old_time) tim.getTime(msg="Total Running Time")
def create_E_vs_Q_dgs(som, E_i, Q_final, **kwargs): """ This function starts with the rebinned energy transfer and turns this into a 2D spectra with E and Q axes for DGS instruments. @param som: The input object with initial IGS wavelength axis @type som: C{SOM.SOM} @param E_i: The initial energy for the given data. @type E_i: C{tuple} @param Q_final: The momentum transfer axis to rebin the data to @type Q_final: C{nessi_list.NessiList} @param kwargs: A list of keyword arguments that the function accepts: @keyword corner_angles: The object that contains the corner geometry information. @type corner_angles: C{dict} @keyword so_id: The identifier represents a number, string, tuple or other object that describes the resulting C{SO} @type so_id: C{int}, C{string}, C{tuple}, C{pixel ID} @keyword y_label: The y axis label @type y_label: C{string} @keyword y_units: The y axis units @type y_units: C{string} @keyword x_labels: This is a list of names that sets the individual x axis labels @type x_labels: C{list} of C{string}s @keyword x_units: This is a list of names that sets the individual x axis units @type x_units: C{list} of C{string}s @keyword split: This flag causes the counts and the fractional area to be written out into separate files. @type split: C{boolean} @keyword configure: This is the object containing the driver configuration. @type configure: C{Configure} @return: Object containing a 2D C{SO} with E and Q axes @rtype: C{SOM.SOM} """ import array_manip import axis_manip import common_lib import hlr_utils import nessi_list import SOM import utils # Check for keywords corner_angles = kwargs["corner_angles"] configure = kwargs.get("configure") split = kwargs.get("split", False) # Setup output object so_dim = SOM.SO(2) so_dim.axis[0].val = Q_final so_dim.axis[1].val = som[0].axis[0].val # E_t # Calculate total 2D array size N_tot = (len(so_dim.axis[0].val) - 1) * (len(so_dim.axis[1].val) - 1) # Create y and var_y lists from total 2D size so_dim.y = nessi_list.NessiList(N_tot) so_dim.var_y = nessi_list.NessiList(N_tot) # Create area sum and errors for the area sum lists from total 2D size area_sum = nessi_list.NessiList(N_tot) area_sum_err2 = nessi_list.NessiList(N_tot) # Convert initial energy to initial wavevector l_i = common_lib.energy_to_wavelength(E_i) k_i = common_lib.wavelength_to_scalar_k(l_i) # Since all the data is rebinned to the same energy transfer axis, we can # calculate the final energy axis once E_t = som[0].axis[0].val if som[0].axis[0].var is not None: E_t_err2 = som[0].axis[0].var else: E_t_err2 = nessi_list.NessiList(len(E_t)) # Get the bin width arrays from E_t (E_t_bw, E_t_bw_err2) = utils.calc_bin_widths(E_t) E_f = array_manip.sub_ncerr(E_i[0], E_i[1], E_t, E_t_err2) # Now we can get the final wavevector l_f = axis_manip.energy_to_wavelength(E_f[0], E_f[1]) k_f = axis_manip.wavelength_to_scalar_k(l_f[0], l_f[1]) # Output position for Q X = 0 # Iterate though the data len_som = hlr_utils.get_length(som) for i in xrange(len_som): map_so = hlr_utils.get_map_so(som, None, i) yval = hlr_utils.get_value(som, i, "SOM", "y") yerr2 = hlr_utils.get_err2(som, i, "SOM", "y") cangles = corner_angles[str(map_so.id)] avg_theta1 = (cangles.getPolar(0) + cangles.getPolar(1)) / 2.0 avg_theta2 = (cangles.getPolar(2) + cangles.getPolar(3)) / 2.0 Q1 = axis_manip.init_scatt_wavevector_to_scalar_Q(k_i[0], k_i[1], k_f[0][:-1], k_f[1][:-1], avg_theta2, 0.0) Q2 = axis_manip.init_scatt_wavevector_to_scalar_Q(k_i[0], k_i[1], k_f[0][:-1], k_f[1][:-1], avg_theta1, 0.0) Q3 = axis_manip.init_scatt_wavevector_to_scalar_Q(k_i[0], k_i[1], k_f[0][1:], k_f[1][1:], avg_theta1, 0.0) Q4 = axis_manip.init_scatt_wavevector_to_scalar_Q(k_i[0], k_i[1], k_f[0][1:], k_f[1][1:], avg_theta2, 0.0) # Calculate the area of the E,Q polygons (A, A_err2) = utils.calc_eq_jacobian_dgs(E_t[:-1], E_t[:-1], E_t[1:], E_t[1:], Q1[X], Q2[X], Q3[X], Q4[X]) # Apply the Jacobian: C/dE_t * dE_t / A(EQ) = C/A(EQ) (jac_ratio, jac_ratio_err2) = array_manip.div_ncerr(E_t_bw, E_t_bw_err2, A, A_err2) (counts, counts_err2) = array_manip.mult_ncerr(yval, yerr2, jac_ratio, jac_ratio_err2) try: (y_2d, y_2d_err2, area_new, bin_count) = axis_manip.rebin_2D_quad_to_rectlin(Q1[X], E_t[:-1], Q2[X], E_t[:-1], Q3[X], E_t[1:], Q4[X], E_t[1:], counts, counts_err2, so_dim.axis[0].val, so_dim.axis[1].val) del bin_count except IndexError, e: # Get the offending index from the error message index = int(str(e).split()[1].split('index')[-1].strip('[]')) print "Id:", map_so.id print "Index:", index print "Verticies: %f, %f, %f, %f, %f, %f, %f, %f" % (Q1[X][index], E_t[:-1][index], Q2[X][index], E_t[:-1][index], Q3[X][index], E_t[1:][index], Q4[X][index], E_t[1:][index]) raise IndexError(str(e)) # Add in together with previous results (so_dim.y, so_dim.var_y) = array_manip.add_ncerr(so_dim.y, so_dim.var_y, y_2d, y_2d_err2) (area_sum, area_sum_err2) = array_manip.add_ncerr(area_sum, area_sum_err2, area_new, area_sum_err2)
def create_Qvec_vs_E_dgs(som, E_i, conf, **kwargs): """ This function starts with the energy transfer axis from DGS reduction and turns this into a 4D spectra with Qx, Qy, Qz and Et axes. @param som: The input object with initial IGS wavelength axis @type som: C{SOM.SOM} @param E_i: The initial energy for the given data. @type E_i: C{tuple} @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param kwargs: A list of keyword arguments that the function accepts: @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @keyword corner_angles: The object that contains the corner geometry information. @type corner_angles: C{dict} @keyword make_fixed: A flag that turns on writing the fixed grid mesh information to a file. @type make_fixed: C{boolean} @keyword output: The output filename and or directory. @type output: C{string} """ import array_manip import axis_manip import common_lib import hlr_utils import os # Check keywords try: t = kwargs["timer"] except KeyError: t = None corner_angles = kwargs["corner_angles"] try: make_fixed = kwargs["make_fixed"] except KeyError: make_fixed = False try: output = kwargs["output"] except KeyError: output = None # Convert initial energy to initial wavevector l_i = common_lib.energy_to_wavelength(E_i) k_i = common_lib.wavelength_to_scalar_k(l_i) # Since all the data is rebinned to the same energy transfer axis, we can # calculate the final energy axis once E_t = som[0].axis[0].val if som[0].axis[0].var is not None: E_t_err2 = som[0].axis[0].var else: import nessi_list E_t_err2 = nessi_list.NessiList(len(E_t)) E_f = array_manip.sub_ncerr(E_i[0], E_i[1], E_t, E_t_err2) # Check for negative final energies which will cause problems with # wavelength conversion due to square root if E_f[0][-1] < 0: E_f[0].reverse() E_f[1].reverse() index = 0 for E in E_f[0]: if E >= 0: break index += 1 E_f[0].__delslice__(0, index) E_f[1].__delslice__(0, index) E_f[0].reverse() E_f[1].reverse() len_E = len(E_f[0]) - 1 # Now we can get the final wavevector l_f = axis_manip.energy_to_wavelength(E_f[0], E_f[1]) k_f = axis_manip.wavelength_to_scalar_k(l_f[0], l_f[1]) # Grab the instrument from the som inst = som.attr_list.instrument if make_fixed: import SOM fixed_grid = {} for key in corner_angles: so_id = SOM.NeXusId.fromString(key).toTuple() try: pathlength = inst.get_secondary(so_id)[0] points = [] for j in range(4): points.extend( __calc_xyz(pathlength, corner_angles[key].getPolar(j), corner_angles[key].getAzimuthal(j))) fixed_grid[key] = points except KeyError: # Pixel ID is not in instrument geometry pass CNT = {} ERR2 = {} V1 = {} V2 = {} V3 = {} V4 = {} # Output positions for Qx, Qy, Qz coordinates X = 0 Y = 2 Z = 4 if t is not None: t.getTime(False) # Iterate though the data len_som = hlr_utils.get_length(som) for i in xrange(len_som): map_so = hlr_utils.get_map_so(som, None, i) yval = hlr_utils.get_value(som, i, "SOM", "y") yerr2 = hlr_utils.get_err2(som, i, "SOM", "y") CNT[str(map_so.id)] = yval ERR2[str(map_so.id)] = yerr2 cangles = corner_angles[str(map_so.id)] Q1 = axis_manip.init_scatt_wavevector_to_Q(k_i[0], k_i[1], k_f[0], k_f[1], cangles.getAzimuthal(0), 0.0, cangles.getPolar(0), 0.0) V1[str(map_so.id)] = {} V1[str(map_so.id)]["x"] = Q1[X] V1[str(map_so.id)]["y"] = Q1[Y] V1[str(map_so.id)]["z"] = Q1[Z] Q2 = axis_manip.init_scatt_wavevector_to_Q(k_i[0], k_i[1], k_f[0], k_f[1], cangles.getAzimuthal(1), 0.0, cangles.getPolar(1), 0.0) V2[str(map_so.id)] = {} V2[str(map_so.id)]["x"] = Q2[X] V2[str(map_so.id)]["y"] = Q2[Y] V2[str(map_so.id)]["z"] = Q2[Z] Q3 = axis_manip.init_scatt_wavevector_to_Q(k_i[0], k_i[1], k_f[0], k_f[1], cangles.getAzimuthal(2), 0.0, cangles.getPolar(2), 0.0) V3[str(map_so.id)] = {} V3[str(map_so.id)]["x"] = Q3[X] V3[str(map_so.id)]["y"] = Q3[Y] V3[str(map_so.id)]["z"] = Q3[Z] Q4 = axis_manip.init_scatt_wavevector_to_Q(k_i[0], k_i[1], k_f[0], k_f[1], cangles.getAzimuthal(3), 0.0, cangles.getPolar(3), 0.0) V4[str(map_so.id)] = {} V4[str(map_so.id)]["x"] = Q4[X] V4[str(map_so.id)]["y"] = Q4[Y] V4[str(map_so.id)]["z"] = Q4[Z] if t is not None: t.getTime(msg="After calculating verticies ") # Form the messages if t is not None: t.getTime(False) jobstr = 'MR' + hlr_utils.create_binner_string(conf) + 'JH' num_lines = len(CNT) * len_E linestr = str(num_lines) if output is not None: outdir = os.path.dirname(output) if outdir != '': if outdir.rfind('.') != -1: outdir = "" else: outdir = "" value = str(som.attr_list["data-run_number"].getValue()).split('/') topdir = os.path.join(outdir, value[0].strip() + "-mesh") try: os.mkdir(topdir) except OSError: pass outtag = os.path.basename(output) if outtag.rfind('.') == -1: outtag = "" else: outtag = outtag.split('.')[0] if outtag != "": filehead = outtag + "_bmesh" if make_fixed: filehead1 = outtag + "_fgrid" filehead2 = outtag + "_conf" else: filehead = "bmesh" if make_fixed: filehead1 = "fgrid" filehead2 = "conf" hfile = open(os.path.join(topdir, "%s.in" % filehead2), "w") print >> hfile, jobstr print >> hfile, linestr hfile.close() import utils use_zero_supp = not conf.no_zero_supp for k in xrange(len_E): ofile = open(os.path.join(topdir, "%s%04d.in" % (filehead, k)), "w") if make_fixed: ofile1 = open(os.path.join(topdir, "%s%04d.in" % (filehead1, k)), "w") for pid in CNT: if use_zero_supp: write_value = not utils.compare(CNT[pid][k], 0.0) == 0 else: write_value = True if write_value: result = [] result.append(str(k)) result.append(str(E_t[k])) result.append(str(E_t[k + 1])) result.append(str(CNT[pid][k])) result.append(str(ERR2[pid][k])) __get_coords(V1, pid, k, result) __get_coords(V2, pid, k, result) __get_coords(V3, pid, k, result) __get_coords(V4, pid, k, result) __get_coords(V1, pid, k + 1, result) __get_coords(V2, pid, k + 1, result) __get_coords(V3, pid, k + 1, result) __get_coords(V4, pid, k + 1, result) print >> ofile, " ".join(result) if make_fixed: result1 = [] result1.append(str(k)) result1.append(str(E_t[k])) result1.append(str(E_t[k + 1])) result1.append(str(CNT[pid][k])) result1.append(str(ERR2[pid][k])) result1.extend([str(x) for x in fixed_grid[pid]]) print >> ofile1, " ".join(result1) ofile.close() if make_fixed: ofile1.close() if t is not None: t.getTime(msg="After creating messages ")
def process_dgs_data(obj, conf, bcan, ecan, tcoeff, **kwargs): """ This function combines Steps 7 through 16 in Section 2.1.1 of the data reduction process for Direct Geometry Spectrometers as specified by the document at U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The function takes a calibrated dataset, a L{hlr_utils.Configure} object and processes the data accordingly. @param obj: A calibrated dataset object. @type obj: C{SOM.SOM} @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param bcan: The object containing the black can data. @type bcan: C{SOM.SOM} @param ecan: The object containing the empty can data. @type ecan: C{SOM.SOM} @param tcoeff: The transmission coefficient appropriate to the given data set. @type tcoeff: C{tuple} @param kwargs: A list of keyword arguments that the function accepts: @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword cwp_used: A flag signalling the use of the chopper phase corrections. @type cwp_used: C{bool} @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @return: Object that has undergone all requested processing steps @rtype: C{SOM.SOM} """ import array_manip import common_lib import dr_lib import hlr_utils # Check keywords try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: t = kwargs["timer"] except KeyError: t = None cwp_used = kwargs.get("cwp_used", False) if conf.verbose: print "Processing %s information" % dataset_type # Step 7: Create black can background contribution if bcan is not None: if conf.verbose: print "Creating black can background contribution for %s" \ % dataset_type if t is not None: t.getTime(False) bccoeff = array_manip.sub_ncerr(1.0, 0.0, tcoeff[0], tcoeff[1]) bcan1 = common_lib.mult_ncerr(bcan, bccoeff) if t is not None: t.getTime(msg="After creating black can background contribution ") del bcan else: bcan1 = None # Step 8: Create empty can background contribution if ecan is not None: if conf.verbose: print "Creating empty can background contribution for %s" \ % dataset_type if t is not None: t.getTime(False) ecan1 = common_lib.mult_ncerr(ecan, tcoeff) if t is not None: t.getTime(msg="After creating empty can background contribution ") del ecan else: ecan1 = None # Step 9: Create background spectra if bcan1 is not None or ecan1 is not None and conf.verbose: print "Creating background spectra for %s" % dataset_type if bcan1 is not None and ecan1 is not None: if cwp_used: if conf.verbose: print "Rebinning empty can to black can axis." ecan2 = common_lib.rebin_axis_1D_frac(ecan1, bcan1[0].axis[0].val) else: ecan2 = ecan1 del ecan1 if t is not None: t.getTime(False) b_som = common_lib.add_ncerr(bcan1, ecan2) if t is not None: t.getTime(msg="After creating background spectra ") elif bcan1 is not None and ecan1 is None: b_som = bcan1 elif bcan1 is None and ecan1 is not None: b_som = ecan1 else: b_som = None del bcan1, ecan1 if cwp_used: if conf.verbose: print "Rebinning background spectra to %s" % dataset_type b_som1 = common_lib.rebin_axis_1D_frac(b_som, obj[0].axis[0].val) else: b_som1 = b_som del b_som if conf.dump_ctof_comb and b_som1 is not None: b_som_1 = dr_lib.sum_all_spectra(b_som1) hlr_utils.write_file(conf.output, "text/Spec", b_som_1, output_ext="ctof", extra_tag="background", data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined background TOF information") del b_som_1 # Step 10: Subtract background from data obj1 = dr_lib.subtract_bkg_from_data(obj, b_som1, verbose=conf.verbose, timer=t, dataset1=dataset_type, dataset2="background") del obj, b_som1 # Step 11: Calculate initial velocity if conf.verbose: print "Calculating initial velocity" if t is not None: t.getTime(False) if conf.initial_energy is not None: initial_wavelength = common_lib.energy_to_wavelength(\ conf.initial_energy.toValErrTuple()) initial_velocity = common_lib.wavelength_to_velocity(\ initial_wavelength) else: # This should actually calculate it, but don't have a way right now pass if t is not None: t.getTime(msg="After calculating initial velocity ") # Step 12: Calculate the time-zero offset if conf.time_zero_offset is not None: time_zero_offset = conf.time_zero_offset.toValErrTuple() else: # This should actually calculate it, but don't have a way right now time_zero_offset = (0.0, 0.0) # Step 13: Convert time-of-flight to final velocity if conf.verbose: print "Converting TOF to final velocity DGS" if t is not None: t.getTime(False) obj2 = common_lib.tof_to_final_velocity_dgs(obj1, initial_velocity, time_zero_offset, units="microsecond") if t is not None: t.getTime(msg="After calculating TOF to final velocity DGS ") del obj1 # Step 14: Convert final velocity to final wavelength if conf.verbose: print "Converting final velocity DGS to final wavelength" if t is not None: t.getTime(False) obj3 = common_lib.velocity_to_wavelength(obj2) if t is not None: t.getTime(msg="After calculating velocity to wavelength ") del obj2 if conf.dump_wave_comb: obj3_1 = dr_lib.sum_all_spectra( obj3, rebin_axis=conf.lambda_bins.toNessiList()) hlr_utils.write_file(conf.output, "text/Spec", obj3_1, output_ext="fwv", extra_tag=dataset_type, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined final wavelength information") del obj3_1 # Step 15: Create the detector efficiency if conf.det_eff is not None: if conf.verbose: print "Creating detector efficiency spectra" if t is not None: t.getTime(False) det_eff = dr_lib.create_det_eff(obj3) if t is not None: t.getTime(msg="After creating detector efficiency spectra ") else: det_eff = None # Step 16: Divide the detector pixel spectra by the detector efficiency if det_eff is not None: if conf.verbose: print "Correcting %s for detector efficiency" % dataset_type if t is not None: t.getTime(False) obj4 = common_lib.div_ncerr(obj3, det_eff) if t is not None: t.getTime(msg="After correcting %s for detector efficiency" \ % dataset_type) else: obj4 = obj3 del obj3, det_eff return obj4
def calibrate_dgs_data(datalist, conf, dkcur, **kwargs): """ This function combines Steps 3 through 6 in Section 2.1.1 of the data reduction process for Direct Geometry Spectrometers as specified by the document at U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The function takes a list of file names, a L{hlr_utils.Configure} object and processes the data accordingly. @param datalist: A list containing the filenames of the data to be processed. @type datalist: C{list} of C{string}s @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param dkcur: The object containing the TOF dark current data. @type dkcur: C{SOM.SOM} @param kwargs: A list of keyword arguments that the function accepts: @keyword inst_geom_dst: File object that contains instrument geometry information. @type inst_geom_dst: C{DST.GeomDST} @keyword tib_const: A time-independent background constant to subtract from every pixel. @type tib_const: L{hlr_utils.DrParameter} @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword cwp: A list of chopper phase corrections in units of microseconds. @type cwp: C{list} of C{float}s @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @return: Object that has undergone all requested processing steps @rtype: C{SOM.SOM} """ import common_lib import dr_lib import hlr_utils # Check keywords try: tib_const = kwargs["tib_const"] except KeyError: tib_const = None try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: t = kwargs["timer"] except KeyError: t = None try: i_geom_dst = kwargs["inst_geom_dst"] except KeyError: i_geom_dst = None dataset_cwp = kwargs.get("cwp") # Open the appropriate datafiles if conf.verbose: print "Reading %s file" % dataset_type data_paths = conf.data_paths.toPath() if conf.no_mon_norm: mon_paths = None else: mon_paths = conf.usmon_path.toPath() # Check for mask file since normalization drive doesn't understand option try: mask_file = conf.mask_file except AttributeError: mask_file = None if t is not None: oldtime = t.getOldTime() (dp_som0, dm_som0) = dr_lib.add_files_dm(datalist, Data_Paths=data_paths, Mon_Paths=mon_paths, SO_Axis=conf.so_axis, Signal_ROI=conf.roi_file, Signal_MASK=mask_file, dataset_type=dataset_type, dataset_cwp=dataset_cwp, Verbose=conf.verbose, Timer=t) if t is not None: t.setOldTime(oldtime) t.getTime(msg="After reading %s file" % dataset_type) # Cut the spectra if necessary dp_somA = dr_lib.cut_spectra(dp_som0, conf.tof_cut_min, conf.tof_cut_max) del dp_som0 dp_somB = dr_lib.fix_bin_contents(dp_somA) del dp_somA if dp_somB.attr_list.instrument.get_name() != "CNCS": if conf.verbose: print "Cutting spectrum at minimum TOF" if t is not None: t.getTime(False) # Calculate minimum TOF for physical neutrons if conf.initial_energy is not None: initial_wavelength = common_lib.energy_to_wavelength(\ conf.initial_energy.toValErrTuple()) initial_velocity = common_lib.wavelength_to_velocity(\ initial_wavelength) else: # This should actually calculate it, but don't have a way right now pass if conf.time_zero_offset is not None: time_zero_offset = conf.time_zero_offset.toValErrTuple() else: # This should actually calculate it, but don't have a way right now time_zero_offset = (0.0, 0.0) ss_length = dp_somB.attr_list.instrument.get_primary() tof_min = (ss_length[0] / initial_velocity[0]) + time_zero_offset[0] # Cut all spectra a the minimum TOF dp_som1 = dr_lib.cut_spectra(dp_somB, tof_min, None) if t is not None: t.getTime(msg="After cutting spectrum at minimum TOF ") else: dp_som1 = dp_somB del dp_somB if dm_som0 is not None: dm_som1 = dr_lib.fix_bin_contents(dm_som0) else: dm_som1 = dm_som0 del dm_som0 # Override geometry if necessary if conf.inst_geom is not None: i_geom_dst.setGeometry(data_paths, dp_som1) if conf.inst_geom is not None and dm_som1 is not None: i_geom_dst.setGeometry(mon_paths, dm_som1) # Step 3: Integrate the upstream monitor if dm_som1 is not None: if conf.verbose: print "Integrating upstream monitor spectrum" if t is not None: t.getTime(False) if conf.mon_int_range is None: start_val = float("inf") end_val = float("inf") else: start_val = conf.mon_int_range[0] end_val = conf.mon_int_range[1] dm_som2 = dr_lib.integrate_spectra(dm_som1, start=start_val, end=end_val, width=True) if t is not None: t.getTime(msg="After integrating upstream monitor spectrum ") else: dm_som2 = dm_som1 del dm_som1 tib_norm_const = None # Step 4: Divide data set by summed monitor spectrum if dm_som2 is not None: if conf.verbose: print "Normalizing %s by monitor sum" % dataset_type if t is not None: t.getTime(False) dp_som2 = common_lib.div_ncerr(dp_som1, dm_som2, length_one_som=True) tib_norm_const = dm_som2[0].y if t is not None: t.getTime(msg="After normalizing %s by monitor sum" % dataset_type) elif conf.pc_norm: if conf.verbose: print "Normalizing %s by proton charge" % dataset_type pc_tag = dataset_type+"-proton_charge" pc = dp_som1.attr_list[pc_tag] # Scale the proton charge and then set the scale PC back to attributes if conf.scale_pc is not None: if conf.verbose: print "Scaling %s proton charge" % dataset_type pc = hlr_utils.scale_proton_charge(pc, conf.scale_pc) dp_som1.attr_list[pc_tag] = pc tib_norm_const = pc.getValue() if t is not None: t.getTime(False) dp_som2 = common_lib.div_ncerr(dp_som1, (pc.getValue(), 0.0)) if t is not None: t.getTime(msg="After normalizing %s by proton charge" \ % dataset_type) else: dp_som2 = dp_som1 del dp_som1, dm_som2 # Step 5: Scale dark current by data set measurement time if dkcur is not None: if conf.verbose: print "Scaling dark current by %s acquisition time" % dataset_type if t is not None: t.getTime(False) dstime_tag = dataset_type+"-duration" dstime = dp_som2.attr_list[dstime_tag] dkcur1 = common_lib.div_ncerr(dkcur, (dstime.getValue(), 0.0)) if t is not None: t.getTime(msg="After scaling dark current by %s acquisition time" \ % dataset_type) else: dkcur1 = dkcur del dkcur # Step 6: Subtract scaled dark current from data set if dkcur1 is not None: if conf.verbose: print "Subtracting %s by scaled dark current" % dataset_type if t is not None: t.getTime(False) dp_som3 = common_lib.sub_ncerr(dp_som2, dkcur1) if t is not None: t.getTime(msg="After subtracting %s by scaled dark current" \ % dataset_type) elif tib_const is not None and dkcur1 is None: if conf.verbose: print "Subtracting TIB constant from %s" % dataset_type # Normalize the TIB constant by dividing by the current normalization # the duration (if necessary) and the conversion from seconds to # microseconds tib_c = tib_const.toValErrTuple() conv_sec_to_usec = 1.0e-6 if tib_norm_const is None: tib_norm_const = 1 duration = 1 else: duration_tag = dataset_type+"-duration" duration = dp_som2.attr_list[duration_tag].getValue() norm_const = (duration * conv_sec_to_usec) / tib_norm_const tib_val = tib_c[0] * norm_const tib_err2 = tib_c[1] * (norm_const * norm_const) if t is not None: t.getTime(False) dp_som3 = common_lib.sub_ncerr(dp_som2, (tib_val, tib_err2)) if t is not None: t.getTime(msg="After subtracting TIB constant from %s" \ % dataset_type) elif conf.tib_range is not None and dkcur1 is None: if conf.verbose: print "Determining TIB constant from %s" % dataset_type if t is not None: t.getTime(False) TIB = dr_lib.determine_time_indep_bkg(dp_som2, conf.tib_range, is_range=True) if t is not None: t.getTime(msg="After determining TIB constant from %s" \ % dataset_type) if conf.dump_tib: file_comment = "TIB TOF Range: [%d, %d]" % (conf.tib_range[0], conf.tib_range[1]) hlr_utils.write_file(conf.output, "text/num-info", TIB, output_ext="tib", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="time-independent background "\ +"information", tag="Average TIB", units="counts/usec", comments=[file_comment]) if conf.verbose: print "Subtracting TIB constant from %s" % dataset_type if t is not None: t.getTime(False) dp_som3 = common_lib.sub_ncerr(dp_som2, TIB) if t is not None: t.getTime(msg="After subtracting TIB constant from %s" \ % dataset_type) del TIB else: dp_som3 = dp_som2 del dp_som2, dkcur1 if conf.dump_ctof_comb: dp_som3_1 = dr_lib.sum_all_spectra(dp_som3) hlr_utils.write_file(conf.output, "text/Spec", dp_som3_1, output_ext="ctof", extra_tag=dataset_type, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined calibrated TOF information") del dp_som3_1 return dp_som3
def calibrate_dgs_data(datalist, conf, dkcur, **kwargs): """ This function combines Steps 3 through 6 in Section 2.1.1 of the data reduction process for Direct Geometry Spectrometers as specified by the document at U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The function takes a list of file names, a L{hlr_utils.Configure} object and processes the data accordingly. @param datalist: A list containing the filenames of the data to be processed. @type datalist: C{list} of C{string}s @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param dkcur: The object containing the TOF dark current data. @type dkcur: C{SOM.SOM} @param kwargs: A list of keyword arguments that the function accepts: @keyword inst_geom_dst: File object that contains instrument geometry information. @type inst_geom_dst: C{DST.GeomDST} @keyword tib_const: A time-independent background constant to subtract from every pixel. @type tib_const: L{hlr_utils.DrParameter} @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword cwp: A list of chopper phase corrections in units of microseconds. @type cwp: C{list} of C{float}s @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @return: Object that has undergone all requested processing steps @rtype: C{SOM.SOM} """ import common_lib import dr_lib import hlr_utils # Check keywords try: tib_const = kwargs["tib_const"] except KeyError: tib_const = None try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: t = kwargs["timer"] except KeyError: t = None try: i_geom_dst = kwargs["inst_geom_dst"] except KeyError: i_geom_dst = None dataset_cwp = kwargs.get("cwp") # Open the appropriate datafiles if conf.verbose: print "Reading %s file" % dataset_type data_paths = conf.data_paths.toPath() if conf.no_mon_norm: mon_paths = None else: mon_paths = conf.usmon_path.toPath() # Check for mask file since normalization drive doesn't understand option try: mask_file = conf.mask_file except AttributeError: mask_file = None if t is not None: oldtime = t.getOldTime() (dp_som0, dm_som0) = dr_lib.add_files_dm(datalist, Data_Paths=data_paths, Mon_Paths=mon_paths, SO_Axis=conf.so_axis, Signal_ROI=conf.roi_file, Signal_MASK=mask_file, dataset_type=dataset_type, dataset_cwp=dataset_cwp, Verbose=conf.verbose, Timer=t) if t is not None: t.setOldTime(oldtime) t.getTime(msg="After reading %s file" % dataset_type) # Cut the spectra if necessary dp_somA = dr_lib.cut_spectra(dp_som0, conf.tof_cut_min, conf.tof_cut_max) del dp_som0 dp_somB = dr_lib.fix_bin_contents(dp_somA) del dp_somA if dp_somB.attr_list.instrument.get_name() != "CNCS": if conf.verbose: print "Cutting spectrum at minimum TOF" if t is not None: t.getTime(False) # Calculate minimum TOF for physical neutrons if conf.initial_energy is not None: initial_wavelength = common_lib.energy_to_wavelength(\ conf.initial_energy.toValErrTuple()) initial_velocity = common_lib.wavelength_to_velocity(\ initial_wavelength) else: # This should actually calculate it, but don't have a way right now pass if conf.time_zero_offset is not None: time_zero_offset = conf.time_zero_offset.toValErrTuple() else: # This should actually calculate it, but don't have a way right now time_zero_offset = (0.0, 0.0) ss_length = dp_somB.attr_list.instrument.get_primary() tof_min = (ss_length[0] / initial_velocity[0]) + time_zero_offset[0] # Cut all spectra a the minimum TOF dp_som1 = dr_lib.cut_spectra(dp_somB, tof_min, None) if t is not None: t.getTime(msg="After cutting spectrum at minimum TOF ") else: dp_som1 = dp_somB del dp_somB if dm_som0 is not None: dm_som1 = dr_lib.fix_bin_contents(dm_som0) else: dm_som1 = dm_som0 del dm_som0 # Override geometry if necessary if conf.inst_geom is not None: i_geom_dst.setGeometry(data_paths, dp_som1) if conf.inst_geom is not None and dm_som1 is not None: i_geom_dst.setGeometry(mon_paths, dm_som1) # Step 3: Integrate the upstream monitor if dm_som1 is not None: if conf.verbose: print "Integrating upstream monitor spectrum" if t is not None: t.getTime(False) if conf.mon_int_range is None: start_val = float("inf") end_val = float("inf") else: start_val = conf.mon_int_range[0] end_val = conf.mon_int_range[1] dm_som2 = dr_lib.integrate_spectra(dm_som1, start=start_val, end=end_val, width=True) if t is not None: t.getTime(msg="After integrating upstream monitor spectrum ") else: dm_som2 = dm_som1 del dm_som1 tib_norm_const = None # Step 4: Divide data set by summed monitor spectrum if dm_som2 is not None: if conf.verbose: print "Normalizing %s by monitor sum" % dataset_type if t is not None: t.getTime(False) dp_som2 = common_lib.div_ncerr(dp_som1, dm_som2, length_one_som=True) tib_norm_const = dm_som2[0].y if t is not None: t.getTime(msg="After normalizing %s by monitor sum" % dataset_type) elif conf.pc_norm: if conf.verbose: print "Normalizing %s by proton charge" % dataset_type pc_tag = dataset_type + "-proton_charge" pc = dp_som1.attr_list[pc_tag] # Scale the proton charge and then set the scale PC back to attributes if conf.scale_pc is not None: if conf.verbose: print "Scaling %s proton charge" % dataset_type pc = hlr_utils.scale_proton_charge(pc, conf.scale_pc) dp_som1.attr_list[pc_tag] = pc tib_norm_const = pc.getValue() if t is not None: t.getTime(False) dp_som2 = common_lib.div_ncerr(dp_som1, (pc.getValue(), 0.0)) if t is not None: t.getTime(msg="After normalizing %s by proton charge" \ % dataset_type) else: dp_som2 = dp_som1 del dp_som1, dm_som2 # Step 5: Scale dark current by data set measurement time if dkcur is not None: if conf.verbose: print "Scaling dark current by %s acquisition time" % dataset_type if t is not None: t.getTime(False) dstime_tag = dataset_type + "-duration" dstime = dp_som2.attr_list[dstime_tag] dkcur1 = common_lib.div_ncerr(dkcur, (dstime.getValue(), 0.0)) if t is not None: t.getTime(msg="After scaling dark current by %s acquisition time" \ % dataset_type) else: dkcur1 = dkcur del dkcur # Step 6: Subtract scaled dark current from data set if dkcur1 is not None: if conf.verbose: print "Subtracting %s by scaled dark current" % dataset_type if t is not None: t.getTime(False) dp_som3 = common_lib.sub_ncerr(dp_som2, dkcur1) if t is not None: t.getTime(msg="After subtracting %s by scaled dark current" \ % dataset_type) elif tib_const is not None and dkcur1 is None: if conf.verbose: print "Subtracting TIB constant from %s" % dataset_type # Normalize the TIB constant by dividing by the current normalization # the duration (if necessary) and the conversion from seconds to # microseconds tib_c = tib_const.toValErrTuple() conv_sec_to_usec = 1.0e-6 if tib_norm_const is None: tib_norm_const = 1 duration = 1 else: duration_tag = dataset_type + "-duration" duration = dp_som2.attr_list[duration_tag].getValue() norm_const = (duration * conv_sec_to_usec) / tib_norm_const tib_val = tib_c[0] * norm_const tib_err2 = tib_c[1] * (norm_const * norm_const) if t is not None: t.getTime(False) dp_som3 = common_lib.sub_ncerr(dp_som2, (tib_val, tib_err2)) if t is not None: t.getTime(msg="After subtracting TIB constant from %s" \ % dataset_type) elif conf.tib_range is not None and dkcur1 is None: if conf.verbose: print "Determining TIB constant from %s" % dataset_type if t is not None: t.getTime(False) TIB = dr_lib.determine_time_indep_bkg(dp_som2, conf.tib_range, is_range=True) if t is not None: t.getTime(msg="After determining TIB constant from %s" \ % dataset_type) if conf.dump_tib: file_comment = "TIB TOF Range: [%d, %d]" % (conf.tib_range[0], conf.tib_range[1]) hlr_utils.write_file(conf.output, "text/num-info", TIB, output_ext="tib", extra_tag=dataset_type, verbose=conf.verbose, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, message="time-independent background "\ +"information", tag="Average TIB", units="counts/usec", comments=[file_comment]) if conf.verbose: print "Subtracting TIB constant from %s" % dataset_type if t is not None: t.getTime(False) dp_som3 = common_lib.sub_ncerr(dp_som2, TIB) if t is not None: t.getTime(msg="After subtracting TIB constant from %s" \ % dataset_type) del TIB else: dp_som3 = dp_som2 del dp_som2, dkcur1 if conf.dump_ctof_comb: dp_som3_1 = dr_lib.sum_all_spectra(dp_som3) hlr_utils.write_file(conf.output, "text/Spec", dp_som3_1, output_ext="ctof", extra_tag=dataset_type, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined calibrated TOF information") del dp_som3_1 return dp_som3
def create_E_vs_Q_dgs(som, E_i, Q_final, **kwargs): """ This function starts with the rebinned energy transfer and turns this into a 2D spectra with E and Q axes for DGS instruments. @param som: The input object with initial IGS wavelength axis @type som: C{SOM.SOM} @param E_i: The initial energy for the given data. @type E_i: C{tuple} @param Q_final: The momentum transfer axis to rebin the data to @type Q_final: C{nessi_list.NessiList} @param kwargs: A list of keyword arguments that the function accepts: @keyword corner_angles: The object that contains the corner geometry information. @type corner_angles: C{dict} @keyword so_id: The identifier represents a number, string, tuple or other object that describes the resulting C{SO} @type so_id: C{int}, C{string}, C{tuple}, C{pixel ID} @keyword y_label: The y axis label @type y_label: C{string} @keyword y_units: The y axis units @type y_units: C{string} @keyword x_labels: This is a list of names that sets the individual x axis labels @type x_labels: C{list} of C{string}s @keyword x_units: This is a list of names that sets the individual x axis units @type x_units: C{list} of C{string}s @keyword split: This flag causes the counts and the fractional area to be written out into separate files. @type split: C{boolean} @keyword configure: This is the object containing the driver configuration. @type configure: C{Configure} @return: Object containing a 2D C{SO} with E and Q axes @rtype: C{SOM.SOM} """ import array_manip import axis_manip import common_lib import hlr_utils import nessi_list import SOM import utils # Check for keywords corner_angles = kwargs["corner_angles"] configure = kwargs.get("configure") split = kwargs.get("split", False) # Setup output object so_dim = SOM.SO(2) so_dim.axis[0].val = Q_final so_dim.axis[1].val = som[0].axis[0].val # E_t # Calculate total 2D array size N_tot = (len(so_dim.axis[0].val) - 1) * (len(so_dim.axis[1].val) - 1) # Create y and var_y lists from total 2D size so_dim.y = nessi_list.NessiList(N_tot) so_dim.var_y = nessi_list.NessiList(N_tot) # Create area sum and errors for the area sum lists from total 2D size area_sum = nessi_list.NessiList(N_tot) area_sum_err2 = nessi_list.NessiList(N_tot) # Convert initial energy to initial wavevector l_i = common_lib.energy_to_wavelength(E_i) k_i = common_lib.wavelength_to_scalar_k(l_i) # Since all the data is rebinned to the same energy transfer axis, we can # calculate the final energy axis once E_t = som[0].axis[0].val if som[0].axis[0].var is not None: E_t_err2 = som[0].axis[0].var else: E_t_err2 = nessi_list.NessiList(len(E_t)) # Get the bin width arrays from E_t (E_t_bw, E_t_bw_err2) = utils.calc_bin_widths(E_t) E_f = array_manip.sub_ncerr(E_i[0], E_i[1], E_t, E_t_err2) # Now we can get the final wavevector l_f = axis_manip.energy_to_wavelength(E_f[0], E_f[1]) k_f = axis_manip.wavelength_to_scalar_k(l_f[0], l_f[1]) # Output position for Q X = 0 # Iterate though the data len_som = hlr_utils.get_length(som) for i in xrange(len_som): map_so = hlr_utils.get_map_so(som, None, i) yval = hlr_utils.get_value(som, i, "SOM", "y") yerr2 = hlr_utils.get_err2(som, i, "SOM", "y") cangles = corner_angles[str(map_so.id)] avg_theta1 = (cangles.getPolar(0) + cangles.getPolar(1)) / 2.0 avg_theta2 = (cangles.getPolar(2) + cangles.getPolar(3)) / 2.0 Q1 = axis_manip.init_scatt_wavevector_to_scalar_Q( k_i[0], k_i[1], k_f[0][:-1], k_f[1][:-1], avg_theta2, 0.0) Q2 = axis_manip.init_scatt_wavevector_to_scalar_Q( k_i[0], k_i[1], k_f[0][:-1], k_f[1][:-1], avg_theta1, 0.0) Q3 = axis_manip.init_scatt_wavevector_to_scalar_Q( k_i[0], k_i[1], k_f[0][1:], k_f[1][1:], avg_theta1, 0.0) Q4 = axis_manip.init_scatt_wavevector_to_scalar_Q( k_i[0], k_i[1], k_f[0][1:], k_f[1][1:], avg_theta2, 0.0) # Calculate the area of the E,Q polygons (A, A_err2) = utils.calc_eq_jacobian_dgs(E_t[:-1], E_t[:-1], E_t[1:], E_t[1:], Q1[X], Q2[X], Q3[X], Q4[X]) # Apply the Jacobian: C/dE_t * dE_t / A(EQ) = C/A(EQ) (jac_ratio, jac_ratio_err2) = array_manip.div_ncerr(E_t_bw, E_t_bw_err2, A, A_err2) (counts, counts_err2) = array_manip.mult_ncerr(yval, yerr2, jac_ratio, jac_ratio_err2) try: (y_2d, y_2d_err2, area_new, bin_count) = axis_manip.rebin_2D_quad_to_rectlin( Q1[X], E_t[:-1], Q2[X], E_t[:-1], Q3[X], E_t[1:], Q4[X], E_t[1:], counts, counts_err2, so_dim.axis[0].val, so_dim.axis[1].val) del bin_count except IndexError, e: # Get the offending index from the error message index = int(str(e).split()[1].split('index')[-1].strip('[]')) print "Id:", map_so.id print "Index:", index print "Verticies: %f, %f, %f, %f, %f, %f, %f, %f" % ( Q1[X][index], E_t[:-1][index], Q2[X][index], E_t[:-1][index], Q3[X][index], E_t[1:][index], Q4[X][index], E_t[1:][index]) raise IndexError(str(e)) # Add in together with previous results (so_dim.y, so_dim.var_y) = array_manip.add_ncerr(so_dim.y, so_dim.var_y, y_2d, y_2d_err2) (area_sum, area_sum_err2) = array_manip.add_ncerr(area_sum, area_sum_err2, area_new, area_sum_err2)
def create_Qvec_vs_E_dgs(som, E_i, conf, **kwargs): """ This function starts with the energy transfer axis from DGS reduction and turns this into a 4D spectra with Qx, Qy, Qz and Et axes. @param som: The input object with initial IGS wavelength axis @type som: C{SOM.SOM} @param E_i: The initial energy for the given data. @type E_i: C{tuple} @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param kwargs: A list of keyword arguments that the function accepts: @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @keyword corner_angles: The object that contains the corner geometry information. @type corner_angles: C{dict} @keyword make_fixed: A flag that turns on writing the fixed grid mesh information to a file. @type make_fixed: C{boolean} @keyword output: The output filename and or directory. @type output: C{string} """ import array_manip import axis_manip import common_lib import hlr_utils import os # Check keywords try: t = kwargs["timer"] except KeyError: t = None corner_angles = kwargs["corner_angles"] try: make_fixed = kwargs["make_fixed"] except KeyError: make_fixed = False try: output = kwargs["output"] except KeyError: output = None # Convert initial energy to initial wavevector l_i = common_lib.energy_to_wavelength(E_i) k_i = common_lib.wavelength_to_scalar_k(l_i) # Since all the data is rebinned to the same energy transfer axis, we can # calculate the final energy axis once E_t = som[0].axis[0].val if som[0].axis[0].var is not None: E_t_err2 = som[0].axis[0].var else: import nessi_list E_t_err2 = nessi_list.NessiList(len(E_t)) E_f = array_manip.sub_ncerr(E_i[0], E_i[1], E_t, E_t_err2) # Check for negative final energies which will cause problems with # wavelength conversion due to square root if E_f[0][-1] < 0: E_f[0].reverse() E_f[1].reverse() index = 0 for E in E_f[0]: if E >= 0: break index += 1 E_f[0].__delslice__(0, index) E_f[1].__delslice__(0, index) E_f[0].reverse() E_f[1].reverse() len_E = len(E_f[0]) - 1 # Now we can get the final wavevector l_f = axis_manip.energy_to_wavelength(E_f[0], E_f[1]) k_f = axis_manip.wavelength_to_scalar_k(l_f[0], l_f[1]) # Grab the instrument from the som inst = som.attr_list.instrument if make_fixed: import SOM fixed_grid = {} for key in corner_angles: so_id = SOM.NeXusId.fromString(key).toTuple() try: pathlength = inst.get_secondary(so_id)[0] points = [] for j in range(4): points.extend(__calc_xyz(pathlength, corner_angles[key].getPolar(j), corner_angles[key].getAzimuthal(j))) fixed_grid[key] = points except KeyError: # Pixel ID is not in instrument geometry pass CNT = {} ERR2 = {} V1 = {} V2 = {} V3 = {} V4 = {} # Output positions for Qx, Qy, Qz coordinates X = 0 Y = 2 Z = 4 if t is not None: t.getTime(False) # Iterate though the data len_som = hlr_utils.get_length(som) for i in xrange(len_som): map_so = hlr_utils.get_map_so(som, None, i) yval = hlr_utils.get_value(som, i, "SOM", "y") yerr2 = hlr_utils.get_err2(som, i, "SOM", "y") CNT[str(map_so.id)] = yval ERR2[str(map_so.id)] = yerr2 cangles = corner_angles[str(map_so.id)] Q1 = axis_manip.init_scatt_wavevector_to_Q(k_i[0], k_i[1], k_f[0], k_f[1], cangles.getAzimuthal(0), 0.0, cangles.getPolar(0), 0.0) V1[str(map_so.id)] = {} V1[str(map_so.id)]["x"] = Q1[X] V1[str(map_so.id)]["y"] = Q1[Y] V1[str(map_so.id)]["z"] = Q1[Z] Q2 = axis_manip.init_scatt_wavevector_to_Q(k_i[0], k_i[1], k_f[0], k_f[1], cangles.getAzimuthal(1), 0.0, cangles.getPolar(1), 0.0) V2[str(map_so.id)] = {} V2[str(map_so.id)]["x"] = Q2[X] V2[str(map_so.id)]["y"] = Q2[Y] V2[str(map_so.id)]["z"] = Q2[Z] Q3 = axis_manip.init_scatt_wavevector_to_Q(k_i[0], k_i[1], k_f[0], k_f[1], cangles.getAzimuthal(2), 0.0, cangles.getPolar(2), 0.0) V3[str(map_so.id)] = {} V3[str(map_so.id)]["x"] = Q3[X] V3[str(map_so.id)]["y"] = Q3[Y] V3[str(map_so.id)]["z"] = Q3[Z] Q4 = axis_manip.init_scatt_wavevector_to_Q(k_i[0], k_i[1], k_f[0], k_f[1], cangles.getAzimuthal(3), 0.0, cangles.getPolar(3), 0.0) V4[str(map_so.id)] = {} V4[str(map_so.id)]["x"] = Q4[X] V4[str(map_so.id)]["y"] = Q4[Y] V4[str(map_so.id)]["z"] = Q4[Z] if t is not None: t.getTime(msg="After calculating verticies ") # Form the messages if t is not None: t.getTime(False) jobstr = 'MR' + hlr_utils.create_binner_string(conf) + 'JH' num_lines = len(CNT) * len_E linestr = str(num_lines) if output is not None: outdir = os.path.dirname(output) if outdir != '': if outdir.rfind('.') != -1: outdir = "" else: outdir = "" value = str(som.attr_list["data-run_number"].getValue()).split('/') topdir = os.path.join(outdir, value[0].strip() + "-mesh") try: os.mkdir(topdir) except OSError: pass outtag = os.path.basename(output) if outtag.rfind('.') == -1: outtag = "" else: outtag = outtag.split('.')[0] if outtag != "": filehead = outtag + "_bmesh" if make_fixed: filehead1 = outtag + "_fgrid" filehead2 = outtag + "_conf" else: filehead = "bmesh" if make_fixed: filehead1 = "fgrid" filehead2 = "conf" hfile = open(os.path.join(topdir, "%s.in" % filehead2), "w") print >> hfile, jobstr print >> hfile, linestr hfile.close() import utils use_zero_supp = not conf.no_zero_supp for k in xrange(len_E): ofile = open(os.path.join(topdir, "%s%04d.in" % (filehead, k)), "w") if make_fixed: ofile1 = open(os.path.join(topdir, "%s%04d.in" % (filehead1, k)), "w") for pid in CNT: if use_zero_supp: write_value = not utils.compare(CNT[pid][k], 0.0) == 0 else: write_value = True if write_value: result = [] result.append(str(k)) result.append(str(E_t[k])) result.append(str(E_t[k+1])) result.append(str(CNT[pid][k])) result.append(str(ERR2[pid][k])) __get_coords(V1, pid, k, result) __get_coords(V2, pid, k, result) __get_coords(V3, pid, k, result) __get_coords(V4, pid, k, result) __get_coords(V1, pid, k+1, result) __get_coords(V2, pid, k+1, result) __get_coords(V3, pid, k+1, result) __get_coords(V4, pid, k+1, result) print >> ofile, " ".join(result) if make_fixed: result1 = [] result1.append(str(k)) result1.append(str(E_t[k])) result1.append(str(E_t[k+1])) result1.append(str(CNT[pid][k])) result1.append(str(ERR2[pid][k])) result1.extend([str(x) for x in fixed_grid[pid]]) print >> ofile1, " ".join(result1) ofile.close() if make_fixed: ofile1.close() if t is not None: t.getTime(msg="After creating messages ")
def run(config, tim=None): """ This method is where the data reduction process gets done. @param config: Object containing the data reduction configuration information. @type config: L{hlr_utils.Configure} @param tim: (OPTIONAL) Object that will allow the method to perform timing evaluations. @type tim: C{sns_time.DiffTime} """ import common_lib import dr_lib import DST if tim is not None: tim.getTime(False) old_time = tim.getOldTime() if config.data is None: raise RuntimeError("Need to pass a data filename to the driver " + "script.") # Read in geometry if one is provided if config.inst_geom is not None: if config.verbose: print "Reading in instrument geometry file" inst_geom_dst = DST.getInstance("application/x-NxsGeom", config.inst_geom) else: inst_geom_dst = None config.so_axis = "time_of_flight" # Steps 1-3: Produce a scaled summed dark current dataset dc_som = dr_lib.scaled_summed_data(config.dkcur, config, dataset_type="dark_current", timer=tim) # Perform Steps 3-6 on black can data if config.bcan is not None: b_som1 = dr_lib.calibrate_dgs_data( config.bcan, config, dc_som, dataset_type="black_can", inst_geom_dst=inst_geom_dst, tib_const=config.tib_const, cwp=config.cwp_bcan, timer=tim, ) else: b_som1 = None # Perform Steps 3-6 on empty can data if config.ecan is not None: e_som1 = dr_lib.calibrate_dgs_data( config.ecan, config, dc_som, dataset_type="empty_can", inst_geom_dst=inst_geom_dst, tib_const=config.tib_const, cwp=config.cwp_ecan, timer=tim, ) else: e_som1 = None # Perform Steps 3-6 on normalization data n_som1 = dr_lib.calibrate_dgs_data( config.data, config, dc_som, dataset_type="normalization", inst_geom_dst=inst_geom_dst, tib_const=config.tib_const, cwp=config.cwp_data, timer=tim, ) # Perform Steps 7-16 on normalization data if config.norm_trans_coeff is None: norm_trans_coeff = None else: norm_trans_coeff = config.norm_trans_coeff.toValErrTuple() # Determine if we need to rebin the empty or black can data if config.ecan is not None and e_som1 is not None: ecan_cwp = True else: ecan_cwp = False if config.bcan is not None and b_som1 is not None: bcan_cwp = True else: bcan_cwp = False cwp_used = ecan_cwp or bcan_cwp n_som2 = dr_lib.process_dgs_data( n_som1, config, b_som1, e_som1, norm_trans_coeff, dataset_type="normalization", cwp_used=cwp_used, timer=tim ) del n_som1, b_som1, e_som1 # Step 17: Integrate normalization spectra if config.verbose: print "Integrating normalization spectra" if tim is not None: tim.getTime(False) if config.norm_int_range is None: start_val = float("inf") end_val = float("inf") else: if not config.wb_norm: # Translate energy transfer to final energy ef_start = config.initial_energy.getValue() - config.norm_int_range[0] ef_end = config.initial_energy.getValue() - config.norm_int_range[1] # Convert final energy to final wavelength start_val = common_lib.energy_to_wavelength((ef_start, 0.0))[0] end_val = common_lib.energy_to_wavelength((ef_end, 0.0))[0] else: start_val = config.norm_int_range[0] end_val = config.norm_int_range[1] n_som3 = dr_lib.integrate_spectra(n_som2, start=start_val, end=end_val, width=True) del n_som2 if tim is not None: tim.getTime(msg="After integrating normalization spectra ") file_comment = "Normalization Integration range: %0.3fA, %0.3fA" % (start_val, end_val) hlr_utils.write_file( config.output, "text/num-info", n_som3, output_ext="norm", data_ext=config.ext_replacement, path_replacement=config.path_replacement, verbose=config.verbose, message="normalization values", comments=[file_comment], tag="Integral", units="counts", ) if tim is not None: tim.getTime(False) if config.verbose: print "Making mask file" # Make mask file from threshold dr_lib.filter_normalization(n_som3, config.lo_threshold, config.hi_threshold, config) if tim is not None: tim.getTime(msg="After making mask file ") # Write out RMD file n_som3.attr_list["config"] = config hlr_utils.write_file( config.output, "text/rmd", n_som3, output_ext="rmd", data_ext=config.ext_replacement, path_replacement=config.path_replacement, verbose=config.verbose, message="metadata", ) if tim is not None: tim.setOldTime(old_time) tim.getTime(msg="Total Running Time")
def process_dgs_data(obj, conf, bcan, ecan, tcoeff, **kwargs): """ This function combines Steps 7 through 16 in Section 2.1.1 of the data reduction process for Direct Geometry Spectrometers as specified by the document at U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The function takes a calibrated dataset, a L{hlr_utils.Configure} object and processes the data accordingly. @param obj: A calibrated dataset object. @type obj: C{SOM.SOM} @param conf: Object that contains the current setup of the driver. @type conf: L{hlr_utils.Configure} @param bcan: The object containing the black can data. @type bcan: C{SOM.SOM} @param ecan: The object containing the empty can data. @type ecan: C{SOM.SOM} @param tcoeff: The transmission coefficient appropriate to the given data set. @type tcoeff: C{tuple} @param kwargs: A list of keyword arguments that the function accepts: @keyword dataset_type: The practical name of the dataset being processed. The default value is I{data}. @type dataset_type: C{string} @keyword cwp_used: A flag signalling the use of the chopper phase corrections. @type cwp_used: C{bool} @keyword timer: Timing object so the function can perform timing estimates. @type timer: C{sns_timer.DiffTime} @return: Object that has undergone all requested processing steps @rtype: C{SOM.SOM} """ import array_manip import common_lib import dr_lib import hlr_utils # Check keywords try: dataset_type = kwargs["dataset_type"] except KeyError: dataset_type = "data" try: t = kwargs["timer"] except KeyError: t = None cwp_used = kwargs.get("cwp_used", False) if conf.verbose: print "Processing %s information" % dataset_type # Step 7: Create black can background contribution if bcan is not None: if conf.verbose: print "Creating black can background contribution for %s" \ % dataset_type if t is not None: t.getTime(False) bccoeff = array_manip.sub_ncerr(1.0, 0.0, tcoeff[0], tcoeff[1]) bcan1 = common_lib.mult_ncerr(bcan, bccoeff) if t is not None: t.getTime(msg="After creating black can background contribution ") del bcan else: bcan1 = None # Step 8: Create empty can background contribution if ecan is not None: if conf.verbose: print "Creating empty can background contribution for %s" \ % dataset_type if t is not None: t.getTime(False) ecan1 = common_lib.mult_ncerr(ecan, tcoeff) if t is not None: t.getTime(msg="After creating empty can background contribution ") del ecan else: ecan1 = None # Step 9: Create background spectra if bcan1 is not None or ecan1 is not None and conf.verbose: print "Creating background spectra for %s" % dataset_type if bcan1 is not None and ecan1 is not None: if cwp_used: if conf.verbose: print "Rebinning empty can to black can axis." ecan2 = common_lib.rebin_axis_1D_frac(ecan1, bcan1[0].axis[0].val) else: ecan2 = ecan1 del ecan1 if t is not None: t.getTime(False) b_som = common_lib.add_ncerr(bcan1, ecan2) if t is not None: t.getTime(msg="After creating background spectra ") elif bcan1 is not None and ecan1 is None: b_som = bcan1 elif bcan1 is None and ecan1 is not None: b_som = ecan1 else: b_som = None del bcan1, ecan1 if cwp_used: if conf.verbose: print "Rebinning background spectra to %s" % dataset_type b_som1 = common_lib.rebin_axis_1D_frac(b_som, obj[0].axis[0].val) else: b_som1 = b_som del b_som if conf.dump_ctof_comb and b_som1 is not None: b_som_1 = dr_lib.sum_all_spectra(b_som1) hlr_utils.write_file(conf.output, "text/Spec", b_som_1, output_ext="ctof", extra_tag="background", data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined background TOF information") del b_som_1 # Step 10: Subtract background from data obj1 = dr_lib.subtract_bkg_from_data(obj, b_som1, verbose=conf.verbose, timer=t, dataset1=dataset_type, dataset2="background") del obj, b_som1 # Step 11: Calculate initial velocity if conf.verbose: print "Calculating initial velocity" if t is not None: t.getTime(False) if conf.initial_energy is not None: initial_wavelength = common_lib.energy_to_wavelength(\ conf.initial_energy.toValErrTuple()) initial_velocity = common_lib.wavelength_to_velocity(\ initial_wavelength) else: # This should actually calculate it, but don't have a way right now pass if t is not None: t.getTime(msg="After calculating initial velocity ") # Step 12: Calculate the time-zero offset if conf.time_zero_offset is not None: time_zero_offset = conf.time_zero_offset.toValErrTuple() else: # This should actually calculate it, but don't have a way right now time_zero_offset = (0.0, 0.0) # Step 13: Convert time-of-flight to final velocity if conf.verbose: print "Converting TOF to final velocity DGS" if t is not None: t.getTime(False) obj2 = common_lib.tof_to_final_velocity_dgs(obj1, initial_velocity, time_zero_offset, units="microsecond") if t is not None: t.getTime(msg="After calculating TOF to final velocity DGS ") del obj1 # Step 14: Convert final velocity to final wavelength if conf.verbose: print "Converting final velocity DGS to final wavelength" if t is not None: t.getTime(False) obj3 = common_lib.velocity_to_wavelength(obj2) if t is not None: t.getTime(msg="After calculating velocity to wavelength ") del obj2 if conf.dump_wave_comb: obj3_1 = dr_lib.sum_all_spectra(obj3, rebin_axis=conf.lambda_bins.toNessiList()) hlr_utils.write_file(conf.output, "text/Spec", obj3_1, output_ext="fwv", extra_tag=dataset_type, data_ext=conf.ext_replacement, path_replacement=conf.path_replacement, verbose=conf.verbose, message="combined final wavelength information") del obj3_1 # Step 15: Create the detector efficiency if conf.det_eff is not None: if conf.verbose: print "Creating detector efficiency spectra" if t is not None: t.getTime(False) det_eff = dr_lib.create_det_eff(obj3) if t is not None: t.getTime(msg="After creating detector efficiency spectra ") else: det_eff = None # Step 16: Divide the detector pixel spectra by the detector efficiency if det_eff is not None: if conf.verbose: print "Correcting %s for detector efficiency" % dataset_type if t is not None: t.getTime(False) obj4 = common_lib.div_ncerr(obj3, det_eff) if t is not None: t.getTime(msg="After correcting %s for detector efficiency" \ % dataset_type) else: obj4 = obj3 del obj3, det_eff return obj4