예제 #1
0
def process_reflp_data(datalist,
                       conf,
                       roi_file,
                       bkg_roi_file=None,
                       no_bkg=False,
                       **kwargs):
    """
    This function combines Steps 1 through 3 in section 2.4.6.1 of the data
    reduction process for Reduction from TOF to lambda_T as specified by
    the document at
    U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The
    function takes a list of file names, a L{hlr_utils.Configure} object,
    region-of-interest (ROI) file for the normalization dataset, a background
    region-of-interest (ROI) file and an optional flag about background
    subtractionand processes the data accordingly.

    @param datalist: The filenames of the data to be processed
    @type datalist: C{list} of C{string}s

    @param conf: Object that contains the current setup of the driver
    @type conf: L{hlr_utils.Configure}

    @param roi_file: The file containing the list of pixel IDs for the region
                     of interest. This only applies to normalization data. 
    @type roi_file: C{string}

    @param bkg_roi_file: The file containing the list of pixel IDs for the
                         (possible) background region of interest.
    @type bkg_roi_file: C{string}    
    
    @param no_bkg: (OPTIONAL) Flag which determines if the background will be
                              calculated and subtracted.
    @type no_bkg: C{boolean}    

    @param kwargs: A list of keyword arguments that the function accepts:

    @keyword inst_geom_dst: File object that contains instrument geometry
                            information.
    @type inst_geom_dst: C{DST.GeomDST}

    @keyword timer:  Timing object so the function can perform timing
                     estimates.
    @type timer: C{sns_timer.DiffTime}


    @return: Object that has undergone all requested processing steps
    @rtype: C{SOM.SOM}
    """
    import hlr_utils
    import common_lib
    import dr_lib

    # Check keywords
    try:
        i_geom_dst = kwargs["inst_geom_dst"]
    except KeyError:
        i_geom_dst = None

    try:
        t = kwargs["timer"]
    except KeyError:
        t = None

    if roi_file is not None:
        # Normalization
        dataset_type = "norm"
    else:
        # Sample data
        dataset_type = "data"

    so_axis = "time_of_flight"

    # Step 0: Open data files and select ROI (if necessary)
    if conf.verbose:
        print "Reading %s file" % dataset_type

    if len(conf.norm_data_paths) and dataset_type == "norm":
        data_path = conf.norm_data_paths.toPath()
    else:
        data_path = conf.data_paths.toPath()

    (d_som1, b_som1) = dr_lib.add_files_bg(datalist,
                                           Data_Paths=data_path,
                                           SO_Axis=so_axis,
                                           dataset_type=dataset_type,
                                           Signal_ROI=roi_file,
                                           Bkg_ROI=bkg_roi_file,
                                           Verbose=conf.verbose,
                                           Timer=t)

    if t is not None:
        t.getTime(msg="After reading %s " % dataset_type)

    # Override geometry if necessary
    if i_geom_dst is not None:
        i_geom_dst.setGeometry(conf.data_paths.toPath(), d_som1)

    if dataset_type == "data":
        # Get TOF bin width
        conf.delta_TOF = d_som1[0].axis[0].val[1] - d_som1[0].axis[0].val[0]

    if conf.mon_norm:
        if conf.verbose:
            print "Reading in monitor data from %s file" % dataset_type

        # The [0] is to get the data SOM and ignore the None background SOM
        dm_som1 = dr_lib.add_files(datalist,
                                   Data_Paths=conf.mon_path.toPath(),
                                   SO_Axis=so_axis,
                                   dataset_type=dataset_type,
                                   Verbose=conf.verbose,
                                   Timer=t)

        if t is not None:
            t.getTime(msg="After reading monitor data ")

    else:
        dm_som1 = None

    # Step 1: Sum all spectra along the low resolution direction
    # Set sorting for REF_L
    if conf.verbose:
        print "Summing over low resolution direction"

    # Set sorting
    (y_sort, cent_pixel) = hlr_utils.get_ref_integration_direction(
        conf.int_dir, conf.inst, d_som1.attr_list.instrument)

    if t is not None:
        t.getTime(False)

    d_som2 = dr_lib.sum_all_spectra(d_som1,
                                    y_sort=y_sort,
                                    stripe=True,
                                    pixel_fix=cent_pixel)

    if b_som1 is not None:
        b_som2 = dr_lib.sum_all_spectra(b_som1,
                                        y_sort=y_sort,
                                        stripe=True,
                                        pixel_fix=cent_pixel)
        del b_som1
    else:
        b_som2 = b_som1

    if t is not None:
        t.getTime(msg="After summing low resolution direction ")

    del d_som1

    # Determine background spectrum
    if conf.verbose and not no_bkg:
        print "Determining %s background" % dataset_type

    if b_som2 is not None:
        B = dr_lib.calculate_ref_background(b_som2,
                                            no_bkg,
                                            conf.inst,
                                            None,
                                            aobj=d_som2)
    if t is not None:
        t.getTime(msg="After background determination")

    # Subtract background spectrum from data spectra
    if not no_bkg:
        d_som3 = dr_lib.subtract_bkg_from_data(d_som2,
                                               B,
                                               verbose=conf.verbose,
                                               timer=t,
                                               dataset1="data",
                                               dataset2="background")
    else:
        d_som3 = d_som2

    del d_som2

    # Zero the spectra if necessary
    if roi_file is None and (conf.tof_cut_min is not None or \
                             conf.tof_cut_max is not None):
        import utils
        # Find the indicies for the non zero range
        if conf.tof_cut_min is None:
            conf.TOF_min = d_som3[0].axis[0].val[0]
            start_index = 0
        else:
            start_index = utils.bisect_helper(d_som3[0].axis[0].val,
                                              conf.tof_cut_min)

        if conf.tof_cut_max is None:
            conf.TOF_max = d_som3[0].axis[0].val[-1]
            end_index = len(d_som3[0].axis[0].val) - 1
        else:
            end_index = utils.bisect_helper(d_som3[0].axis[0].val,
                                            conf.tof_cut_max)

        nz_list = []
        for i in xrange(hlr_utils.get_length(d_som3)):
            nz_list.append((start_index, end_index))

        d_som4 = dr_lib.zero_spectra(d_som3, nz_list, use_bin_index=True)
    else:
        conf.TOF_min = d_som3[0].axis[0].val[0]
        conf.TOF_max = d_som3[0].axis[0].val[-1]
        d_som4 = d_som3

    del d_som3

    # Step N: Convert TOF to wavelength
    if conf.verbose:
        print "Converting TOF to wavelength"

    if t is not None:
        t.getTime(False)

    d_som5 = common_lib.tof_to_wavelength(d_som4,
                                          inst_param="total",
                                          units="microsecond")
    if dm_som1 is not None:
        dm_som2 = common_lib.tof_to_wavelength(dm_som1, units="microsecond")
    else:
        dm_som2 = None

    del dm_som1

    if t is not None:
        t.getTime(msg="After converting TOF to wavelength ")

    del d_som4

    if conf.mon_norm:
        dm_som3 = dr_lib.rebin_monitor(dm_som2, d_som5, rtype="frac")
    else:
        dm_som3 = None

    del dm_som2

    if not conf.mon_norm:
        # Step 2: Multiply the spectra by the proton charge
        if conf.verbose:
            print "Multiply spectra by proton charge"

        pc_tag = dataset_type + "-proton_charge"
        proton_charge = d_som5.attr_list[pc_tag]

        if t is not None:
            t.getTime(False)

        d_som6 = common_lib.div_ncerr(d_som5, (proton_charge.getValue(), 0.0))

        if t is not None:
            t.getTime(msg="After scaling by proton charge ")
    else:
        if conf.verbose:
            print "Normalize by monitor spectrum"

        if t is not None:
            t.getTime(False)

        d_som6 = common_lib.div_ncerr(d_som5, dm_som3)

        if t is not None:
            t.getTime(msg="After monitor normalization ")

    del d_som5, dm_som3

    if roi_file is None:
        return d_som6
    else:
        # Step 3: Make one spectrum for normalization dataset
        # Need to create a final rebinning axis
        pathlength = d_som6.attr_list.instrument.get_total_path(
            det_secondary=True)

        delta_lambda = common_lib.tof_to_wavelength((conf.delta_TOF, 0.0),
                                                    pathlength=pathlength)

        lambda_bins = dr_lib.create_axis_from_data(d_som6,
                                                   width=delta_lambda[0])

        return dr_lib.sum_by_rebin_frac(d_som6, lambda_bins.toNessiList())
예제 #2
0
def run(config, tim=None):
    """
    This method is where the data reduction process gets done.

    @param config: Object containing the data reduction configuration
                   information.
    @type config: L{hlr_utils.Configure}

    @param tim: (OPTIONAL) Object that will allow the method to perform
                           timing evaluations.
    @type tim: C{sns_time.DiffTime}
    """
    import common_lib
    import dr_lib
    import DST
    
    if tim is not None:
        tim.getTime(False)
        old_time = tim.getOldTime()

    if config.data is None:
        raise RuntimeError("Need to pass a data filename to the driver "\
                           +"script.")

    # Read in geometry if one is provided
    if config.inst_geom is not None:
        if config.verbose:
            print "Reading in instrument geometry file"
            
        inst_geom_dst = DST.getInstance("application/x-NxsGeom",
                                        config.inst_geom)
    else:
        inst_geom_dst = None

    only_background = False
    data_type = "transmission"
        
    # Perform Steps 1,6-7 or 1,3,5-7 on sample data
    d_som1 = dr_lib.process_sas_data(config.data, config, timer=tim,
                                     inst_geom_dst=inst_geom_dst,
                                     dataset_type=data_type,
                                     transmission=True,
                                     get_background=only_background)

    # Perform Steps 1,6-7 on background data
    if config.back is not None:
        b_som1 = dr_lib.process_sas_data(config.back, config, timer=tim,
                                         inst_geom_dst=inst_geom_dst,
                                         dataset_type="trans_bkg",
                                         transmission=True)
    else:
        b_som1 = None

    # Put the datasets on the same axis
    d_som2 = dr_lib.sum_by_rebin_frac(d_som1, config.lambda_bins.toNessiList())
    del d_som1

    if b_som1 is not None:
        b_som2 = dr_lib.sum_by_rebin_frac(b_som1,
                                          config.lambda_bins.toNessiList())
    else:
        b_som2 = None
        
    del b_som1    
    
    # Divide the data spectrum by the background spectrum
    if b_som2 is not None:
        d_som3 = common_lib.div_ncerr(d_som2, b_som2)
    else:
        d_som3 = d_som2

    del d_som2, b_som2

    # Reset y units to dimensionless for the tranmission due to ratio
    if config.back is not None:
        d_som3.setYLabel("Ratio")
        d_som3.setYUnits("")
        write_message = "transmission spectrum"
    else:
        write_message = "spectrum for background estimation"

    # Write out the transmission spectrum
    hlr_utils.write_file(config.output, "text/Spec", d_som3,
                         verbose=config.verbose,
                         replace_path=False,
                         replace_ext=False,
                         message=write_message)

    d_som3.attr_list["config"] = config

    hlr_utils.write_file(config.output, "text/rmd", d_som3,
                         output_ext="rmd",
                         data_ext=config.ext_replacement,         
                         path_replacement=config.path_replacement,
                         verbose=config.verbose,
                         message="metadata")

    if tim is not None:
        tim.setOldTime(old_time)
        tim.getTime(msg="Total Running Time")    
예제 #3
0
def process_sas_data(datalist, conf, **kwargs):
    """
    This function combines Steps 1 through 9 of the data reduction process for
    Small-Angle Scattering section 2.5.1 as specified by the documents at
    U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The
    function takes a list of file names, a L{hlr_utils.Configure} object and
    processes the data accordingly. This function should really only be used in
    the context of I{sas_reduction}.

    @param datalist: A list containing the filenames of the data to be
    processed.
    @type datalist: C{list} of C{string}s
    
    @param conf: Object that contains the current setup of the driver.
    @type conf: L{hlr_utils.Configure}
    
    @param kwargs: A list of keyword arguments that the function accepts:
    
    @keyword inst_geom_dst: File object that contains instrument geometry
                            information.
    @type inst_geom_dst: C{DST.GeomDST}
    
    @keyword dataset_type: The practical name of the dataset being processed.
                           The default value is I{data}.
    @type dataset_type: C{string}

    @keyword trans_data: Alternate data for the transmission spectrum. This is
                         used in the absence of transmission monitors.
    @type trans_data: C{string}

    @keyword transmission: A flag that signals the function to stop after
                           doing the conversion from TOF to wavelength. The
                           default is I{False}.
    @type transmission: C{boolean}

    @keyword bkg_subtract: A list of coefficients that help determine the
                           wavelength dependent background subtraction.
    @type bkg_subtract: C{list}

    @keyword get_background: A flag that signals the function to convert the
                             main data to wavelength and exit before
                             normalizing to the beam monitor.
    @type get_background: C{boolean}

    @keyword acc_down_time: The information for the accelerator downtime.
    @type acc_down_time: C{tuple}

    @keyword bkg_scale: The scaling used for the axis dependent background
                        parameters.
    @type bkg_scale: C{float}

    @keyword timer: Timing object so the function can perform timing estimates.
    @type timer: C{sns_timer.DiffTime}


    @return: Object that has undergone all requested processing steps
    @rtype: C{SOM.SOM}
    """
    import common_lib
    import dr_lib
    import hlr_utils

    # Check keywords
    try:
        dataset_type = kwargs["dataset_type"]
    except KeyError:
        dataset_type = "data"

    try:
        i_geom_dst = kwargs["inst_geom_dst"]
    except KeyError:
        i_geom_dst = None

    try:
        t = kwargs["timer"]
    except KeyError:
        t = None

    try:
        transmission = kwargs["transmission"]
    except KeyError:
        transmission = False

    try:
        bkg_subtract = kwargs["bkg_subtract"]
    except KeyError:
        bkg_subtract = None

    try:
        trans_data = kwargs["trans_data"]
    except KeyError:
        trans_data = None

    try:
        get_background = kwargs["get_background"]
    except KeyError:
        get_background = False

    acc_down_time = kwargs.get("acc_down_time")
    bkg_scale = kwargs.get("bkg_scale")

    # Add so_axis to Configure object
    conf.so_axis = "time_of_flight"

    # Step 0: Open appropriate data files

    # Data
    if conf.verbose:
        print "Reading %s file" % dataset_type

    # The [0] is to get the data SOM and ignore the None background SOM
    dp_som = dr_lib.add_files(datalist, Data_Paths=conf.data_paths.toPath(),
                              SO_Axis=conf.so_axis, Signal_ROI=conf.roi_file,
                              dataset_type=dataset_type,
                              Verbose=conf.verbose, Timer=t)
    
    if t is not None:
        t.getTime(msg="After reading %s " % dataset_type)

    dp_som1 = dr_lib.fix_bin_contents(dp_som)

    del dp_som

    if conf.inst_geom is not None:
        i_geom_dst.setGeometry(conf.data_paths.toPath(), dp_som1)

    if conf.dump_tof_r:
        dp_som1_1 = dr_lib.create_param_vs_Y(dp_som1, "radius", "param_array",
                                             conf.r_bins.toNessiList(),
                                             y_label="counts",
                                             y_units="counts / (usec * m)",
                                             x_labels=["Radius", "TOF"], 
                                             x_units=["m", "usec"])

        hlr_utils.write_file(conf.output, "text/Dave2d", dp_som1_1,
                             output_ext="tvr",
                             extra_tag=dataset_type,
                             verbose=conf.verbose,
                             data_ext=conf.ext_replacement,
                             path_replacement=conf.path_replacement,
                             message="TOF vs radius information")

        del dp_som1_1

    if conf.dump_tof_theta:
        dp_som1_1 = dr_lib.create_param_vs_Y(dp_som1, "polar", "param_array",
                                             conf.theta_bins.toNessiList(),
                                             y_label="counts",
                                             y_units="counts / (usec * rads)",
                                             x_labels=["Polar Angle", "TOF"], 
                                             x_units=["rads", "usec"])

        hlr_utils.write_file(conf.output, "text/Dave2d", dp_som1_1,
                             output_ext="tvt",
                             extra_tag=dataset_type,
                             verbose=conf.verbose,
                             data_ext=conf.ext_replacement,
                             path_replacement=conf.path_replacement,
                             message="TOF vs polar angle information")        

        del dp_som1_1
        
    # Beam monitor
    if not get_background:
        if conf.beammon_over is None:
            if conf.verbose:
                print "Reading in beam monitor data from %s file" \
                      % dataset_type

                # The [0] is to get the data SOM and ignore the None
                # background SOM
                dbm_som0 = dr_lib.add_files(datalist,
                                            Data_Paths=conf.bmon_path.toPath(),
                                            SO_Axis=conf.so_axis,
                                            dataset_type=dataset_type,
                                            Verbose=conf.verbose,
                                            Timer=t)
            
                if t is not None:
                    t.getTime(msg="After reading beam monitor data ")

                if conf.inst_geom is not None:
                    i_geom_dst.setGeometry(conf.bmon_path.toPath(), dbm_som0)
        else:
            if conf.verbose:
                print "Reading in vanadium data"

                dbm_som0 = dr_lib.add_files(datalist,
                                          Data_Paths=conf.data_paths.toPath(),
                                            Signal_ROI=conf.roi_file,
                                            SO_Axis=conf.so_axis,
                                            dataset_type=dataset_type,
                                            Verbose=conf.verbose,
                                            Timer=t)
                if t is not None:
                    t.getTime(msg="After reading vanadium data ")

                if conf.inst_geom is not None:
                    i_geom_dst.setGeometry(conf.data_paths.toPath(), dbm_som0)


        dbm_som1 = dr_lib.fix_bin_contents(dbm_som0)
        
        del dbm_som0
    else:
        dbm_som1 = None

    # Transmission monitor
    if trans_data is None:
        if conf.verbose:
            print "Reading in transmission monitor data from %s file" \
                  % dataset_type
        try:
            dtm_som0 = dr_lib.add_files(datalist,
                                        Data_Paths=conf.tmon_path.toPath(),
                                        SO_Axis=conf.so_axis,
                                        dataset_type=dataset_type,
                                        Verbose=conf.verbose,
                                        Timer=t)
            if t is not None:
                t.getTime(msg="After reading transmission monitor data ")

                if conf.inst_geom is not None:
                    i_geom_dst.setGeometry(conf.tmon_path.toPath(), dtm_som0)
                    
            dtm_som1 = dr_lib.fix_bin_contents(dtm_som0)
                
            del dtm_som0
        # Transmission monitor cannot be found
        except KeyError:
            if conf.verbose:
                print "Transmission monitor not found"
            dtm_som1 = None
    else:
        dtm_som1 = None

    # Note: time_zero_offset_det MUST be a tuple
    if conf.time_zero_offset_det is not None:
        dp_som1.attr_list["Time_zero_offset_det"] = \
                                     conf.time_zero_offset_det.toValErrTuple()
    # Note: time_zero_offset_mon MUST be a tuple
    if conf.time_zero_offset_mon is not None and not get_background and \
           conf.beammon_over is None:
        dbm_som1.attr_list["Time_zero_offset_mon"] = \
                                     conf.time_zero_offset_mon.toValErrTuple()
    if conf.beammon_over is not None:
        dbm_som1.attr_list["Time_zero_offset_det"] = \
                                     conf.time_zero_offset_det.toValErrTuple()
    if trans_data is None and dtm_som1 is not None:
        dtm_som1.attr_list["Time_zero_offset_mon"] = \
                                     conf.time_zero_offset_mon.toValErrTuple()

    # Step 1: Convert TOF to wavelength for data and monitor
    if conf.verbose:
        print "Converting TOF to wavelength"

    if t is not None:
        t.getTime(False)

    if not get_background:
        # Convert beam monitor
        if conf.beammon_over is None:
            dbm_som2 = common_lib.tof_to_wavelength_lin_time_zero(
                dbm_som1,
                units="microsecond",
                time_zero_offset=conf.time_zero_offset_mon.toValErrTuple())
        else:
            dbm_som2 = common_lib.tof_to_wavelength_lin_time_zero(
                dbm_som1,
                units="microsecond",
                time_zero_offset=conf.time_zero_offset_det.toValErrTuple(),
                inst_param="total")
    else:
        dbm_som2 = None

    # Convert detector pixels
    dp_som2 = common_lib.tof_to_wavelength_lin_time_zero(
        dp_som1,
        units="microsecond",
        time_zero_offset=conf.time_zero_offset_det.toValErrTuple(),
        inst_param="total")

    if get_background:
        return dp_som2

    if dtm_som1 is not None:
        # Convert transmission  monitor
        dtm_som2 = common_lib.tof_to_wavelength_lin_time_zero(
            dtm_som1,
            units="microsecond",
            time_zero_offset=conf.time_zero_offset_mon.toValErrTuple())
    else:
        dtm_som2 = dtm_som1
        
    if t is not None:
        t.getTime(msg="After converting TOF to wavelength ")

    del dp_som1, dbm_som1, dtm_som1

    if conf.verbose and (conf.lambda_low_cut is not None or \
                         conf.lambda_high_cut is not None):
        print "Cutting data spectra"

    if t is not None:
        t.getTime(False)

    dp_som3 = dr_lib.cut_spectra(dp_som2, conf.lambda_low_cut,
                                 conf.lambda_high_cut)

    if t is not None:
        t.getTime(msg="After cutting data spectra ")

    del dp_som2

    if conf.beammon_over is not None:
        dbm_som2 = dr_lib.cut_spectra(dbm_som2, conf.lambda_low_cut,
                                       conf.lambda_high_cut)
        
    if conf.dump_wave:
        hlr_utils.write_file(conf.output, "text/Spec", dp_som3,
                             output_ext="pxl",
                             extra_tag=dataset_type,
                             verbose=conf.verbose,
                             data_ext=conf.ext_replacement,
                             path_replacement=conf.path_replacement,
                             message="pixel wavelength information")
    if conf.dump_bmon_wave:
        if conf.beammon_over is None:
            hlr_utils.write_file(conf.output, "text/Spec", dbm_som2,
                                 output_ext="bmxl",
                                 extra_tag=dataset_type,
                                 verbose=conf.verbose,
                                 data_ext=conf.ext_replacement,
                                 path_replacement=conf.path_replacement,
                                 message="beam monitor wavelength information")
        else:
            
            dbm_som2_1 = dr_lib.sum_by_rebin_frac(dbm_som2,
                                               conf.lambda_bins.toNessiList())
            hlr_utils.write_file(conf.output, "text/Spec", dbm_som2_1,
                                 output_ext="bmxl",
                                 extra_tag=dataset_type,
                                 verbose=conf.verbose,
                                 data_ext=conf.ext_replacement,
                                 path_replacement=conf.path_replacement,
                                 message="beam monitor override wavelength "\
                                 +"information")
            del dbm_som2_1

    # Step 2: Subtract wavelength dependent background if necessary
    if conf.verbose and bkg_subtract is not None:
        print "Subtracting wavelength dependent background"
        
    if bkg_subtract is not None:
        if t is not None:
            t.getTime(False)

        duration = dp_som3.attr_list["%s-duration" % dataset_type]
        scale = duration.getValue() - acc_down_time[0]
            
        dp_som4 = dr_lib.subtract_axis_dep_bkg(dp_som3, bkg_subtract,
                                               old_scale=bkg_scale,
                                               new_scale=scale)

        if t is not None:
            t.getTime(msg="After subtracting wavelength dependent background ")
    else:
        dp_som4 = dp_som3

    del dp_som3

    # Step 3: Efficiency correct beam monitor
    if conf.verbose and conf.mon_effc:
        print "Efficiency correct beam monitor data"

    if t is not None:
        t.getTime(False)

    if conf.mon_effc:
        dbm_som3 = dr_lib.feff_correct_mon(dbm_som2, inst_name=conf.inst,
                                           eff_const=conf.mon_eff_const)
    else:
        dbm_som3 = dbm_som2

    if t is not None and conf.mon_effc:
        t.getTime(msg="After efficiency correcting beam monitor ")

    if conf.dump_bmon_effc and conf.mon_effc:   
        hlr_utils.write_file(conf.output, "text/Spec", dbm_som3,
                             output_ext="bmel",
                             extra_tag=dataset_type,
                             verbose=conf.verbose,
                             data_ext=conf.ext_replacement,
                             path_replacement=conf.path_replacement,
                             message="beam monitor wavelength information "\
                             +"(efficiency)")

    del dbm_som2

    # Step 4: Efficiency correct transmission monitor    
    if dtm_som2 is not None:
        if conf.verbose and conf.mon_effc:
            print "Efficiency correct transmission monitor data"

        if t is not None:
            t.getTime(False)

        if conf.mon_effc:
            dtm_som3 = dr_lib.feff_correct_mon(dtm_som2)
        else:
            dtm_som3 = dtm_som2
    else:
        dtm_som3 = dtm_som2            

    if t is not None and conf.mon_effc and dtm_som2 is not None:
        t.getTime(msg="After efficiency correcting beam monitor ")

    # Step 5: Efficiency correct detector pixels
    if conf.det_effc:
        if conf.verbose:
            print "Calculating detector efficiency"

        if t is not None:
            t.getTime(False)

        det_eff = dr_lib.create_det_eff(dp_som4, inst_name=conf.inst,
                                      eff_scale_const=conf.det_eff_scale_const,
                                      eff_atten_const=conf.det_eff_atten_const)

        if t is not None:
            t.getTime(msg="After calculating detector efficiency")

        if conf.verbose:
            print "Applying detector efficiency"

        if t is not None:
            t.getTime(False)

        dp_som5 = common_lib.div_ncerr(dp_som4, det_eff)

        if t is not None:
            t.getTime(msg="After spplying detector efficiency")

    else:
        dp_som5 = dp_som4

    del dp_som4

    # Step 6: Rebin beam monitor axis onto detector pixel axis
    if conf.beammon_over is None:
        if not conf.no_bmon_norm:
            if conf.verbose:
                print "Rebin beam monitor axis to detector pixel axis"

            if t is not None:
                t.getTime(False)

            dbm_som4 = dr_lib.rebin_monitor(dbm_som3, dp_som5, rtype="frac")

            if t is not None:
                t.getTime(msg="After rebinning beam monitor ")
        else:
            dbm_som4 = dbm_som3
    else:
        dbm_som4 = dbm_som3

    del dbm_som3

    if conf.dump_bmon_rebin:
        hlr_utils.write_file(conf.output, "text/Spec", dbm_som4,
                             output_ext="bmrl",
                             extra_tag=dataset_type,
                             verbose=conf.verbose,
                             data_ext=conf.ext_replacement,
                             path_replacement=conf.path_replacement,
                             message="beam monitor wavelength information "\
                             +"(rebinned)")

    # Step 7: Normalize data by beam monitor
    if not conf.no_bmon_norm:
        if conf.verbose:
            print "Normalizing data by beam monitor"

        if t is not None:
            t.getTime(False)

        dp_som6 = common_lib.div_ncerr(dp_som5, dbm_som4)

        if t is not None:
            t.getTime(msg="After normalizing data by beam monitor ")
    else:
        dp_som6 = dp_som5

    del dp_som5

    if transmission:
        return dp_som6

    if conf.dump_wave_bmnorm:
        dp_som6_1 = dr_lib.sum_by_rebin_frac(dp_som6,
                                             conf.lambda_bins.toNessiList())

        write_message = "combined pixel wavelength information"
        write_message += " (beam monitor normalized)"
        
        hlr_utils.write_file(conf.output, "text/Spec", dp_som6_1,
                             output_ext="pbml",
                             extra_tag=dataset_type,
                             verbose=conf.verbose,
                             data_ext=conf.ext_replacement,
                             path_replacement=conf.path_replacement,
                             message=write_message)
        del dp_som6_1

    if conf.dump_wave_r:
        dp_som6_1 = dr_lib.create_param_vs_Y(dp_som6, "radius", "param_array",
                                   conf.r_bins.toNessiList(),
                                   rebin_axis=conf.lambda_bins.toNessiList(),
                                   y_label="counts",
                                   y_units="counts / (Angstrom * m)",
                                   x_labels=["Radius", "Wavelength"], 
                                   x_units=["m", "Angstrom"])

        hlr_utils.write_file(conf.output, "text/Dave2d", dp_som6_1,
                             output_ext="lvr",
                             extra_tag=dataset_type,
                             verbose=conf.verbose,
                             data_ext=conf.ext_replacement,
                             path_replacement=conf.path_replacement,
                             message="wavelength vs radius information")

        del dp_som6_1

    if conf.dump_wave_theta:
        dp_som6_1 = dr_lib.create_param_vs_Y(dp_som6, "polar", "param_array",
                                   conf.theta_bins.toNessiList(),
                                   rebin_axis=conf.lambda_bins.toNessiList(),
                                   y_label="counts",
                                   y_units="counts / (Angstrom * rads)",
                                   x_labels=["Polar Angle", "Wavelength"], 
                                   x_units=["rads", "Angstrom"])

        hlr_utils.write_file(conf.output, "text/Dave2d", dp_som6_1,
                             output_ext="lvt",
                             extra_tag=dataset_type,
                             verbose=conf.verbose,
                             data_ext=conf.ext_replacement,
                             path_replacement=conf.path_replacement,
                             message="wavelength vs polar angle information") 

        del dp_som6_1

    # Step 8: Rebin transmission monitor axis onto detector pixel axis
    if trans_data is not None:
        print "Reading in transmission monitor data from file"

        dtm_som3 = dr_lib.add_files([trans_data],
                                    dataset_type=dataset_type,
                                    dst_type="text/Spec",
                                    Verbose=conf.verbose,
                                    Timer=t)

    
    if conf.verbose and dtm_som3 is not None:
        print "Rebin transmission monitor axis to detector pixel axis"
        
    if t is not None:
        t.getTime(False)

    dtm_som4 = dr_lib.rebin_monitor(dtm_som3, dp_som6, rtype="frac")

    if t is not None and dtm_som3 is not None:
        t.getTime(msg="After rebinning transmission monitor ")

    del dtm_som3

    # Step 9: Normalize data by transmission monitor    
    if conf.verbose and dtm_som4 is not None:
        print "Normalizing data by transmission monitor"

    if t is not None:
        t.getTime(False)

    if dtm_som4 is not None:
        # The transmission spectra derived from sas_tranmission does not have
        # the same y information by convention as sample data or a
        # tranmission monitor. Therefore, we'll fake it by setting the
        # y information from the sample data into the transmission
        if trans_data is not None:
            dtm_som4.setYLabel(dp_som6.getYLabel())
            dtm_som4.setYUnits(dp_som6.getYUnits())
        dp_som7 = common_lib.div_ncerr(dp_som6, dtm_som4)
    else:
        dp_som7 = dp_som6

    if t is not None and dtm_som4 is not None:
        t.getTime(msg="After normalizing data by transmission monitor ")

    del dp_som6

    # Step 10: Convert wavelength to Q for data
    if conf.verbose:
        print "Converting data from wavelength to scalar Q"
    
    if t is not None:
        t.getTime(False)

    dp_som8 = common_lib.wavelength_to_scalar_Q(dp_som7)

    if t is not None:
        t.getTime(msg="After converting wavelength to scalar Q ")
        
    del dp_som7

    if conf.facility == "LENS":
        # Step 11: Apply SAS correction factor to data
        if conf.verbose:
            print "Applying geometrical correction"

        if t is not None:
            t.getTime(False)

        dp_som9 = dr_lib.apply_sas_correct(dp_som8)

        if t is not None:
            t.getTime(msg="After applying geometrical correction ")

        return dp_som9
    else:
        return dp_som8
예제 #4
0
def run(config, tim=None):
    """
    This method is where the data reduction process gets done.

    @param config: Object containing the data reduction configuration
                   information.
    @type config: L{hlr_utils.Configure}

    @param tim: (OPTIONAL) Object that will allow the method to perform
                           timing evaluations.
    @type tim: C{sns_time.DiffTime}
    """
    import common_lib
    import dr_lib
    import DST

    if tim is not None:
        tim.getTime(False)
        old_time = tim.getOldTime()

    if config.data is None:
        raise RuntimeError("Need to pass a data filename to the driver "\
                           +"script.")

    # Read in geometry if one is provided
    if config.inst_geom is not None:
        if config.verbose:
            print "Reading in instrument geometry file"

        inst_geom_dst = DST.getInstance("application/x-NxsGeom",
                                        config.inst_geom)
    else:
        inst_geom_dst = None

    # Add so_axis to Configure object
    config.so_axis = "time_of_flight"

    dataset_type = "background"

    # Step 0: Open appropriate data files

    # Data
    if config.verbose:
        print "Reading %s file" % dataset_type

    # The [0] is to get the data SOM and ignore the None background SOM
    dp_som = dr_lib.add_files(config.data,
                              Data_Paths=config.data_paths.toPath(),
                              SO_Axis=config.so_axis,
                              Signal_ROI=config.roi_file,
                              dataset_type=dataset_type,
                              Verbose=config.verbose,
                              Timer=tim)

    if tim is not None:
        tim.getTime(msg="After reading %s " % dataset_type)

    dp_som0 = dr_lib.fix_bin_contents(dp_som)

    del dp_som

    if inst_geom_dst is not None:
        inst_geom_dst.setGeometry(config.data_paths.toPath(), dp_som0)

    # Note: time_zero_offset_det MUST be a tuple
    if config.time_zero_offset_det is not None:
        dp_som0.attr_list["Time_zero_offset_det"] = \
                                   config.time_zero_offset_det.toValErrTuple()

    # Step 2: Convert TOF to wavelength for data
    if config.verbose:
        print "Converting TOF to wavelength"

    if tim is not None:
        tim.getTime(False)

    # Convert detector pixels
    dp_som1 = common_lib.tof_to_wavelength_lin_time_zero(
        dp_som0,
        units="microsecond",
        time_zero_offset=config.time_zero_offset_det.toValErrTuple(),
        inst_param="total")

    if tim is not None:
        tim.getTime(msg="After converting TOF to wavelength ")

    del dp_som0

    if config.verbose:
        print "Cutting spectra"

    if tim is not None:
        tim.getTime(False)

    dp_som2 = dr_lib.cut_spectra(dp_som1, config.lambda_low_cut,
                                 config.lambda_high_cut)

    if tim is not None:
        tim.getTime(msg="After cutting spectra ")

    del dp_som1

    rebin_axis = config.lambda_bins.toNessiList()

    # Put the data on the same axis
    if config.verbose:
        print "Rebinning data onto specified wavelength axis"

    if tim is not None:
        tim.getTime(False)

    dp_som3 = dr_lib.sum_by_rebin_frac(dp_som2, rebin_axis)

    if tim is not None:
        tim.getTime(msg="After rebinning data onto specified wavelength axis ")

    del dp_som2

    data_run_time = dp_som3.attr_list["background-duration"]

    # Calculate the accelerator on time
    if config.verbose:
        print "Calculating accelerator on time"

    acc_on_time = hlr_utils.DrParameter(
        data_run_time.getValue() - config.acc_down_time.getValue(), 0.0,
        "seconds")

    # Get the number of data bins
    num_wave_bins = len(rebin_axis) - 1

    # Calculate the scaled accelerator uptime
    if config.verbose:
        print "Calculating the scaled accelerator uptime"

    if tim is not None:
        tim.getTime(False)

    final_scale = acc_on_time.toValErrTuple()[0] / num_wave_bins

    if tim is not None:
        tim.getTime(msg="After calculating the scaled accelerator uptime ")

    # Create the final background spectrum
    if config.verbose:
        print "Creating the background spectrum"

    if tim is not None:
        tim.getTime(False)

    dp_som4 = common_lib.div_ncerr(dp_som3, (final_scale, 0))
    dp_som4.attr_list["%s-Scaling" % dataset_type] = final_scale

    if tim is not None:
        tim.getTime(msg="After creating background spectrum ")

    del dp_som3

    # Write out the background spectrum
    hlr_utils.write_file(config.output,
                         "text/Spec",
                         dp_som4,
                         verbose=config.verbose,
                         output_ext="bkg",
                         data_ext=config.ext_replacement,
                         replace_path=False,
                         replace_ext=True,
                         message="background spectrum")

    dp_som4.attr_list["config"] = config

    hlr_utils.write_file(config.output,
                         "text/rmd",
                         dp_som4,
                         output_ext="rmd",
                         data_ext=config.ext_replacement,
                         path_replacement=config.path_replacement,
                         verbose=config.verbose,
                         message="metadata")

    if tim is not None:
        tim.setOldTime(old_time)
        tim.getTime(msg="Total Running Time")
예제 #5
0
def run(config, tim=None):
    """
    This method is where the data reduction process gets done.

    @param config: Object containing the data reduction configuration
                   information.
    @type config: L{hlr_utils.Configure}

    @param tim: (OPTIONAL) Object that will allow the method to perform
                           timing evaluations.
    @type tim: C{sns_time.DiffTime}
    """
    import dr_lib
    import DST
    
    if tim is not None:
        tim.getTime(False)
        old_time = tim.getOldTime()

    if config.data is None:
        raise RuntimeError("Need to pass a data filename to the driver "\
                           +"script.")

    # Read in geometry if one is provided
    if config.inst_geom is not None:
        if config.verbose:
            print "Reading in instrument geometry file"
            
        inst_geom_dst = DST.getInstance("application/x-NxsGeom",
                                        config.inst_geom)
    else:
        inst_geom_dst = None

    # Perform Steps 1-11 on sample data
    d_som1 = dr_lib.process_sas_data(config.data, config, timer=tim,
                                     inst_geom_dst=inst_geom_dst,
                                     bkg_subtract=config.bkg_coeff,
                     acc_down_time=config.data_acc_down_time.toValErrTuple(),
                                     bkg_scale=config.bkg_scale,
                                     trans_data=config.data_trans)

    # Perform Steps 1-11 on buffer/solvent only data
    if config.solv is not None:
        s_som1 = dr_lib.process_sas_data(config.solv, config, timer=tim,
                                         inst_geom_dst=inst_geom_dst,
                                         dataset_type="solvent",
                                         bkg_subtract=config.bkg_coeff,
                     acc_down_time=config.solv_acc_down_time.toValErrTuple(),
                                         bkg_scale=config.bkg_scale,
                                         trans_data=config.solv_trans)
    else:
        s_som1 = None

    # Step 12: Subtract buffer/solvent only spectrum from sample spectrum
    d_som2 = dr_lib.subtract_bkg_from_data(d_som1, s_som1,
                                           verbose=config.verbose,
                                           timer=tim,
                                           dataset1="data",
                                           dataset2="solvent")
    
    del s_som1, d_som1

    # Perform Steps 1-11 on empty-can data
    if config.ecan is not None:
        e_som1 = dr_lib.process_sas_data(config.ecan, config, timer=tim,
                                         inst_geom_dst=inst_geom_dst,
                                         dataset_type="empty_can",
                                         bkg_subtract=config.bkg_coeff,
                     acc_down_time=config.ecan_acc_down_time.toValErrTuple(),
                                         bkg_scale=config.bkg_scale,
                                         trans_data=config.ecan_trans)
    else:
        e_som1 = None

    # Step 13: Subtract empty-can spectrum from sample spectrum
    d_som3 = dr_lib.subtract_bkg_from_data(d_som2, e_som1,
                                           verbose=config.verbose,
                                           timer=tim,
                                           dataset1="data",
                                           dataset2="empty_can")
    
    del e_som1, d_som2

    # Perform Steps 1-11 on open beam data
    if config.open is not None:
        o_som1 = dr_lib.process_sas_data(config.open, config, timer=tim,
                                         inst_geom_dst=inst_geom_dst,
                                         dataset_type="open_beam",
                                         bkg_subtract=config.bkg_coeff,
                     acc_down_time=config.open_acc_down_time.toValErrTuple(),
                                         bkg_scale=config.bkg_scale)
    else:
        o_som1 = None
        
    # Step 14: Subtract open beam spectrum from sample spectrum
    d_som4 = dr_lib.subtract_bkg_from_data(d_som3, o_som1,
                                           verbose=config.verbose,
                                           timer=tim,
                                           dataset1="data",
                                           dataset2="open_beam")
    
    del o_som1, d_som3

    # Perform Steps 1-11 on dark current data
    if config.dkcur is not None:
        dc_som1 = dr_lib.process_sas_data(config.open, config, timer=tim,
                                          inst_geom_dst=inst_geom_dst,
                                          dataset_type="dark_current",
                                          bkg_subtract=config.bkg_coeff)
    else:
        dc_som1 = None
        
    # Step 15: Subtract dark current spectrum from sample spectrum
    d_som5 = dr_lib.subtract_bkg_from_data(d_som4, dc_som1,
                                           verbose=config.verbose,
                                           timer=tim,
                                           dataset1="data",
                                           dataset2="dark_current")
    
    del dc_som1, d_som4    

    # Create 2D distributions is necessary
    if config.dump_Q_r:
        d_som5_1 = dr_lib.create_param_vs_Y(d_som5, "radius", "param_array",
                                       config.r_bins.toNessiList(),
                                       rebin_axis=config.Q_bins.toNessiList(),
                                       binnorm=True,
                                       y_label="S",
                                       y_units="Counts / A^-1 m",
                                       x_labels=["Radius", "Q"],
                                       x_units=["m", "1/Angstroms"])

        hlr_utils.write_file(config.output, "text/Dave2d", d_som5_1,
                             output_ext="qvr", verbose=config.verbose,
                             data_ext=config.ext_replacement,
                             path_replacement=config.path_replacement,
                             message="S(r, Q) information")

        del d_som5_1
        
    if config.dump_Q_theta:
        d_som5_1 = dr_lib.create_param_vs_Y(d_som5, "polar", "param_array",
                                       config.theta_bins.toNessiList(),
                                       rebin_axis=config.Q_bins.toNessiList(),
                                       binnorm=True,
                                       y_label="S",
                                       y_units="Counts / A^-1 rads",
                                       x_labels=["Polar Angle", "Q"],
                                       x_units=["rads", "1/Angstroms"])

        hlr_utils.write_file(config.output, "text/Dave2d", d_som5_1,
                             output_ext="qvt", verbose=config.verbose,
                             data_ext=config.ext_replacement,
                             path_replacement=config.path_replacement,
                             message="S(theta, Q) information")

        del d_som5_1
        
    # Steps 16 and 17: Rebin and sum all spectra
    if config.verbose:
        print "Rebinning and summing for final spectrum"
            
    if tim is not None:
        tim.getTime(False)

    if config.dump_frac_rebin:
        set_conf = config
    else:
        set_conf = None

    d_som6 = dr_lib.sum_by_rebin_frac(d_som5, config.Q_bins.toNessiList(),
                                      configure=set_conf)

    if tim is not None:
        tim.getTime(msg="After rebinning and summing for spectrum")    

    del d_som5

    if config.facility == "LENS":
        # Step 18: Scale final spectrum by Q bin centers
        if config.verbose:
            print "Scaling final spectrum by Q centers"
        
        if tim is not None:
            tim.getTime(False)

        d_som7 = dr_lib.fix_bin_contents(d_som6, scale=True, width=True,
                                         units="1/Angstroms")

        if tim is not None:
            tim.getTime(msg="After scaling final spectrum")    
    else:
        d_som7 = d_som6

    del d_som6

    # If rescaling factor present, rescale the data
    if config.rescale_final is not None:
        import common_lib
        d_som8 = common_lib.mult_ncerr(d_som7, (config.rescale_final, 0.0))
    else:
        d_som8 = d_som7

    del d_som7
    
    hlr_utils.write_file(config.output, "text/Spec", d_som8,
                         verbose=config.verbose,
                         replace_path=False,
                         replace_ext=False,
                         message="combined S(Q) information")

    # Create 1D canSAS file
    hlr_utils.write_file(config.output, "text/canSAS", d_som8,
                         verbose=config.verbose,
                         output_ext="xml",
                         data_ext=config.ext_replacement,         
                         path_replacement=config.path_replacement,
                         message="combined S(Q) information")
    
    d_som8.attr_list["config"] = config

    hlr_utils.write_file(config.output, "text/rmd", d_som8,
                         output_ext="rmd",
                         data_ext=config.ext_replacement,         
                         path_replacement=config.path_replacement,
                         verbose=config.verbose,
                         message="metadata")

    if tim is not None:
        tim.setOldTime(old_time)
        tim.getTime(msg="Total Running Time")
예제 #6
0
def run(config, tim=None):
    """
    This method is where the data reduction process gets done.

    @param config: Object containing the data reduction configuration
                   information.
    @type config: L{hlr_utils.Configure}

    @param tim: (OPTIONAL) Object that will allow the method to perform
                           timing evaluations.
    @type tim: C{sns_time.DiffTime}
    """
    import common_lib
    import dr_lib
    import DST

    if tim is not None:
        tim.getTime(False)
        old_time = tim.getOldTime()

    if config.data is None:
        raise RuntimeError("Need to pass a data filename to the driver "\
                           +"script.")

    # Read in geometry if one is provided
    if config.inst_geom is not None:
        if config.verbose:
            print "Reading in instrument geometry file"

        inst_geom_dst = DST.getInstance("application/x-NxsGeom",
                                        config.inst_geom)
    else:
        inst_geom_dst = None

    only_background = False
    data_type = "transmission"

    # Perform Steps 1,6-7 or 1,3,5-7 on sample data
    d_som1 = dr_lib.process_sas_data(config.data,
                                     config,
                                     timer=tim,
                                     inst_geom_dst=inst_geom_dst,
                                     dataset_type=data_type,
                                     transmission=True,
                                     get_background=only_background)

    # Perform Steps 1,6-7 on background data
    if config.back is not None:
        b_som1 = dr_lib.process_sas_data(config.back,
                                         config,
                                         timer=tim,
                                         inst_geom_dst=inst_geom_dst,
                                         dataset_type="trans_bkg",
                                         transmission=True)
    else:
        b_som1 = None

    # Put the datasets on the same axis
    d_som2 = dr_lib.sum_by_rebin_frac(d_som1, config.lambda_bins.toNessiList())
    del d_som1

    if b_som1 is not None:
        b_som2 = dr_lib.sum_by_rebin_frac(b_som1,
                                          config.lambda_bins.toNessiList())
    else:
        b_som2 = None

    del b_som1

    # Divide the data spectrum by the background spectrum
    if b_som2 is not None:
        d_som3 = common_lib.div_ncerr(d_som2, b_som2)
    else:
        d_som3 = d_som2

    del d_som2, b_som2

    # Reset y units to dimensionless for the tranmission due to ratio
    if config.back is not None:
        d_som3.setYLabel("Ratio")
        d_som3.setYUnits("")
        write_message = "transmission spectrum"
    else:
        write_message = "spectrum for background estimation"

    # Write out the transmission spectrum
    hlr_utils.write_file(config.output,
                         "text/Spec",
                         d_som3,
                         verbose=config.verbose,
                         replace_path=False,
                         replace_ext=False,
                         message=write_message)

    d_som3.attr_list["config"] = config

    hlr_utils.write_file(config.output,
                         "text/rmd",
                         d_som3,
                         output_ext="rmd",
                         data_ext=config.ext_replacement,
                         path_replacement=config.path_replacement,
                         verbose=config.verbose,
                         message="metadata")

    if tim is not None:
        tim.setOldTime(old_time)
        tim.getTime(msg="Total Running Time")
예제 #7
0
def process_reflp_data(datalist, conf, roi_file, bkg_roi_file=None,
                     no_bkg=False, **kwargs):
    """
    This function combines Steps 1 through 3 in section 2.4.6.1 of the data
    reduction process for Reduction from TOF to lambda_T as specified by
    the document at
    U{http://neutrons.ornl.gov/asg/projects/SCL/reqspec/DR_Lib_RS.doc}. The
    function takes a list of file names, a L{hlr_utils.Configure} object,
    region-of-interest (ROI) file for the normalization dataset, a background
    region-of-interest (ROI) file and an optional flag about background
    subtractionand processes the data accordingly.

    @param datalist: The filenames of the data to be processed
    @type datalist: C{list} of C{string}s

    @param conf: Object that contains the current setup of the driver
    @type conf: L{hlr_utils.Configure}

    @param roi_file: The file containing the list of pixel IDs for the region
                     of interest. This only applies to normalization data. 
    @type roi_file: C{string}

    @param bkg_roi_file: The file containing the list of pixel IDs for the
                         (possible) background region of interest.
    @type bkg_roi_file: C{string}    
    
    @param no_bkg: (OPTIONAL) Flag which determines if the background will be
                              calculated and subtracted.
    @type no_bkg: C{boolean}    

    @param kwargs: A list of keyword arguments that the function accepts:

    @keyword inst_geom_dst: File object that contains instrument geometry
                            information.
    @type inst_geom_dst: C{DST.GeomDST}

    @keyword timer:  Timing object so the function can perform timing
                     estimates.
    @type timer: C{sns_timer.DiffTime}


    @return: Object that has undergone all requested processing steps
    @rtype: C{SOM.SOM}
    """
    import hlr_utils
    import common_lib
    import dr_lib

    # Check keywords
    try:
        i_geom_dst = kwargs["inst_geom_dst"]
    except KeyError:
        i_geom_dst = None
    
    try:
        t = kwargs["timer"]
    except KeyError:
        t = None

    if roi_file is not None:
        # Normalization
        dataset_type = "norm"
    else:
        # Sample data
        dataset_type = "data"

    so_axis = "time_of_flight"

    # Step 0: Open data files and select ROI (if necessary)
    if conf.verbose:
        print "Reading %s file" % dataset_type

    if len(conf.norm_data_paths) and dataset_type == "norm":
        data_path = conf.norm_data_paths.toPath()
    else:
        data_path = conf.data_paths.toPath()

    (d_som1, b_som1) = dr_lib.add_files_bg(datalist,
                                           Data_Paths=data_path,
                                           SO_Axis=so_axis,
                                           dataset_type=dataset_type,
                                           Signal_ROI=roi_file,
                                           Bkg_ROI=bkg_roi_file,
                                           Verbose=conf.verbose,
                                           Timer=t)

    if t is not None:
        t.getTime(msg="After reading %s " % dataset_type)

    # Override geometry if necessary
    if i_geom_dst is not None:
        i_geom_dst.setGeometry(conf.data_paths.toPath(), d_som1)

    if dataset_type == "data":
        # Get TOF bin width
        conf.delta_TOF = d_som1[0].axis[0].val[1] - d_som1[0].axis[0].val[0]

    if conf.mon_norm:
        if conf.verbose:
            print "Reading in monitor data from %s file" % dataset_type

        # The [0] is to get the data SOM and ignore the None background SOM
        dm_som1 = dr_lib.add_files(datalist, Data_Paths=conf.mon_path.toPath(),
                                   SO_Axis=so_axis,
                                   dataset_type=dataset_type,
                                   Verbose=conf.verbose,
                                   Timer=t)
        
        if t is not None:
            t.getTime(msg="After reading monitor data ")
            
    else:
        dm_som1 = None

    # Step 1: Sum all spectra along the low resolution direction
    # Set sorting for REF_L
    if conf.verbose:
        print "Summing over low resolution direction"

    # Set sorting
    (y_sort,
     cent_pixel) = hlr_utils.get_ref_integration_direction(conf.int_dir,
                                                           conf.inst,
                                                  d_som1.attr_list.instrument)
    
    if t is not None:
        t.getTime(False)

    d_som2 = dr_lib.sum_all_spectra(d_som1, y_sort=y_sort, stripe=True,
                                    pixel_fix=cent_pixel)

    if b_som1 is not None:
        b_som2 = dr_lib.sum_all_spectra(b_som1, y_sort=y_sort, stripe=True,
                                        pixel_fix=cent_pixel)
        del b_som1
    else:
        b_som2 = b_som1

    if t is not None:
        t.getTime(msg="After summing low resolution direction ")
        
    del d_som1

    # Determine background spectrum
    if conf.verbose and not no_bkg:
        print "Determining %s background" % dataset_type

    if b_som2 is not None:
        B = dr_lib.calculate_ref_background(b_som2, no_bkg, conf.inst, None,
                                            aobj=d_som2)
    if t is not None:
        t.getTime(msg="After background determination")

    # Subtract background spectrum from data spectra
    if not no_bkg:
        d_som3 = dr_lib.subtract_bkg_from_data(d_som2, B,
                                               verbose=conf.verbose,
                                               timer=t,
                                               dataset1="data",
                                               dataset2="background")
    else:
        d_som3 = d_som2

    del d_som2

    # Zero the spectra if necessary
    if roi_file is None and (conf.tof_cut_min is not None or \
                             conf.tof_cut_max is not None):
        import utils
        # Find the indicies for the non zero range
        if conf.tof_cut_min is None:
            conf.TOF_min = d_som3[0].axis[0].val[0]
            start_index = 0
        else:
            start_index = utils.bisect_helper(d_som3[0].axis[0].val,
                                              conf.tof_cut_min)

        if conf.tof_cut_max is None:
            conf.TOF_max = d_som3[0].axis[0].val[-1]
            end_index = len(d_som3[0].axis[0].val) - 1
        else:
            end_index = utils.bisect_helper(d_som3[0].axis[0].val,
                                            conf.tof_cut_max)

        nz_list = []
        for i in xrange(hlr_utils.get_length(d_som3)):
            nz_list.append((start_index, end_index))
        
        d_som4 = dr_lib.zero_spectra(d_som3, nz_list, use_bin_index=True)
    else:
        conf.TOF_min = d_som3[0].axis[0].val[0]
        conf.TOF_max = d_som3[0].axis[0].val[-1]
        d_som4 = d_som3

    del d_som3

    # Step N: Convert TOF to wavelength
    if conf.verbose:
        print "Converting TOF to wavelength"

    if t is not None:
        t.getTime(False)

    d_som5 = common_lib.tof_to_wavelength(d_som4, inst_param="total",
                                          units="microsecond")
    if dm_som1 is not None:
        dm_som2 = common_lib.tof_to_wavelength(dm_som1, units="microsecond")
    else:
        dm_som2 = None

    del dm_som1

    if t is not None:
        t.getTime(msg="After converting TOF to wavelength ")

    del d_som4

    if conf.mon_norm:
        dm_som3 = dr_lib.rebin_monitor(dm_som2, d_som5, rtype="frac")
    else:
        dm_som3 = None

    del dm_som2

    if not conf.mon_norm:
        # Step 2: Multiply the spectra by the proton charge
        if conf.verbose:
            print "Multiply spectra by proton charge"

        pc_tag = dataset_type + "-proton_charge"
        proton_charge = d_som5.attr_list[pc_tag]

        if t is not None:
            t.getTime(False)

        d_som6 = common_lib.div_ncerr(d_som5, (proton_charge.getValue(), 0.0))

        if t is not None:
            t.getTime(msg="After scaling by proton charge ")
    else:
        if conf.verbose:
            print "Normalize by monitor spectrum"

        if t is not None:
            t.getTime(False)

        d_som6 = common_lib.div_ncerr(d_som5, dm_som3)

        if t is not None:
            t.getTime(msg="After monitor normalization ")

    del d_som5, dm_som3

    if roi_file is None:
        return d_som6
    else:
        # Step 3: Make one spectrum for normalization dataset
        # Need to create a final rebinning axis
        pathlength = d_som6.attr_list.instrument.get_total_path(
            det_secondary=True)
        
        delta_lambda = common_lib.tof_to_wavelength((conf.delta_TOF, 0.0),
                                                    pathlength=pathlength)

        lambda_bins = dr_lib.create_axis_from_data(d_som6,
                                                   width=delta_lambda[0])

        return dr_lib.sum_by_rebin_frac(d_som6, lambda_bins.toNessiList())
예제 #8
0
def run(config, tim=None):
    """
    This method is where the data reduction process gets done.

    @param config: Object containing the data reduction configuration
                   information.
    @type config: L{hlr_utils.Configure}

    @param tim: (OPTIONAL) Object that will allow the method to perform
                           timing evaluations.
    @type tim: C{sns_time.DiffTime}
    """
    import common_lib
    import dr_lib
    import DST

    if tim is not None:
        tim.getTime(False)
        old_time = tim.getOldTime()

    if config.data is None:
        raise RuntimeError("Need to pass a data filename to the driver " + "script.")

    # Read in geometry if one is provided
    if config.inst_geom is not None:
        if config.verbose:
            print "Reading in instrument geometry file"

        inst_geom_dst = DST.getInstance("application/x-NxsGeom", config.inst_geom)
    else:
        inst_geom_dst = None

    # Add so_axis to Configure object
    config.so_axis = "time_of_flight"

    dataset_type = "background"

    # Step 0: Open appropriate data files

    # Data
    if config.verbose:
        print "Reading %s file" % dataset_type

    # The [0] is to get the data SOM and ignore the None background SOM
    dp_som = dr_lib.add_files(
        config.data,
        Data_Paths=config.data_paths.toPath(),
        SO_Axis=config.so_axis,
        Signal_ROI=config.roi_file,
        dataset_type=dataset_type,
        Verbose=config.verbose,
        Timer=tim,
    )

    if tim is not None:
        tim.getTime(msg="After reading %s " % dataset_type)

    dp_som0 = dr_lib.fix_bin_contents(dp_som)

    del dp_som

    if inst_geom_dst is not None:
        inst_geom_dst.setGeometry(config.data_paths.toPath(), dp_som0)

    # Note: time_zero_offset_det MUST be a tuple
    if config.time_zero_offset_det is not None:
        dp_som0.attr_list["Time_zero_offset_det"] = config.time_zero_offset_det.toValErrTuple()

    # Step 2: Convert TOF to wavelength for data
    if config.verbose:
        print "Converting TOF to wavelength"

    if tim is not None:
        tim.getTime(False)

    # Convert detector pixels
    dp_som1 = common_lib.tof_to_wavelength_lin_time_zero(
        dp_som0, units="microsecond", time_zero_offset=config.time_zero_offset_det.toValErrTuple(), inst_param="total"
    )

    if tim is not None:
        tim.getTime(msg="After converting TOF to wavelength ")

    del dp_som0

    if config.verbose:
        print "Cutting spectra"

    if tim is not None:
        tim.getTime(False)

    dp_som2 = dr_lib.cut_spectra(dp_som1, config.lambda_low_cut, config.lambda_high_cut)

    if tim is not None:
        tim.getTime(msg="After cutting spectra ")

    del dp_som1

    rebin_axis = config.lambda_bins.toNessiList()

    # Put the data on the same axis
    if config.verbose:
        print "Rebinning data onto specified wavelength axis"

    if tim is not None:
        tim.getTime(False)

    dp_som3 = dr_lib.sum_by_rebin_frac(dp_som2, rebin_axis)

    if tim is not None:
        tim.getTime(msg="After rebinning data onto specified wavelength axis ")

    del dp_som2

    data_run_time = dp_som3.attr_list["background-duration"]

    # Calculate the accelerator on time
    if config.verbose:
        print "Calculating accelerator on time"

    acc_on_time = hlr_utils.DrParameter(data_run_time.getValue() - config.acc_down_time.getValue(), 0.0, "seconds")

    # Get the number of data bins
    num_wave_bins = len(rebin_axis) - 1

    # Calculate the scaled accelerator uptime
    if config.verbose:
        print "Calculating the scaled accelerator uptime"

    if tim is not None:
        tim.getTime(False)

    final_scale = acc_on_time.toValErrTuple()[0] / num_wave_bins

    if tim is not None:
        tim.getTime(msg="After calculating the scaled accelerator uptime ")

    # Create the final background spectrum
    if config.verbose:
        print "Creating the background spectrum"

    if tim is not None:
        tim.getTime(False)

    dp_som4 = common_lib.div_ncerr(dp_som3, (final_scale, 0))
    dp_som4.attr_list["%s-Scaling" % dataset_type] = final_scale

    if tim is not None:
        tim.getTime(msg="After creating background spectrum ")

    del dp_som3

    # Write out the background spectrum
    hlr_utils.write_file(
        config.output,
        "text/Spec",
        dp_som4,
        verbose=config.verbose,
        output_ext="bkg",
        data_ext=config.ext_replacement,
        replace_path=False,
        replace_ext=True,
        message="background spectrum",
    )

    dp_som4.attr_list["config"] = config

    hlr_utils.write_file(
        config.output,
        "text/rmd",
        dp_som4,
        output_ext="rmd",
        data_ext=config.ext_replacement,
        path_replacement=config.path_replacement,
        verbose=config.verbose,
        message="metadata",
    )

    if tim is not None:
        tim.setOldTime(old_time)
        tim.getTime(msg="Total Running Time")