Ejemplo n.º 1
0
def do_background_test(background_int, median_lo, median_hi, sigma, mask_zero,\
                        start_index=None, end_index=None):
    """
    Run the background tests

    Required inputs:
      background_int - An integrated workspace
      median_lo - Fraction of median to consider counting low
      median_hi - Fraction of median to consider counting high
      sigma     - Error criterion as a multiple of error bar
      mask_zero - If True, zero background counts will be considered a fail

    """
    logger.notice('Running background count test')

    # What shall we call the output
    lhs_names = lhs_info('names')
    if len(lhs_names) > 0:
        ws_name = lhs_names[0]
    else:
        ws_name = '__do_background_test'

    mask_bkgd, num_failures = MedianDetectorTest(InputWorkspace=background_int,
                                                 StartWorkspaceIndex=start_index, EndWorkspaceIndex=end_index,
                                                 SignificanceTest=sigma,
                                                 LowThreshold=median_lo, HighThreshold=median_hi,
                                                 LowOutlier=0.0, HighOutlier=1e100, ExcludeZeroesFromMedian=True)
    #TODO: Looks like hack! why it returns negative value
    return mask_bkgd, abs(num_failures)
Ejemplo n.º 2
0
def do_bleed_test(sample_run, max_framerate, ignored_pixels):
    """Runs the CreatePSDBleedMask algorithm

    Input:
    sample_run  -  The run number of the sample
    max_framerate - The maximum allowed framerate in a tube. If None, the instrument defaults are used.
    ignored_pixels - The number of central pixels to ignore. If None, the instrument defaults are used.
    """
    # NOTE: Should be deployed on non-normalized workspace only!
    logger.notice("Running PSD bleed test")
    # Load the sample run
    if __Reducer__:  #  Try to use generic loader which would work with files or workspaces alike
        sample_run = __Reducer__.get_run_descriptor(sample_run)
        data_ws = sample_run.get_workspace()  # this will load data if necessary
        ws_name = data_ws.name() + "_bleed"
    else:
        # may be sample run is already a run descriptor despite __Reducer__ have not been exposed
        data_ws = sample_run.get_workspace()  # this will load data if necessary
        ws_name = data_ws.name() + "_bleed"

    if max_framerate is None:  # get defaults
        max_framerate = float(data_ws.getInstrument().getNumberParameter("max-tube-framerate")[0])
    if ignored_pixels is None:  # get defaults
        ignored_pixels = int(data_ws.getInstrument().getNumberParameter("num-ignored-pixels")[0])
    else:
        # Make sure it is an int
        ignored_pixels = int(ignored_pixels)

    # What shall we call the output
    lhs_names = lhs_info("names")
    if len(lhs_names) > 0:
        ws_name = lhs_names[0]
    else:
        ws_name = "__do_bleed__test"
    # Check if all necessary logs present in the workspace,as nxs workspace log names are different
    #  from a raw file workspace logs.
    try:
        nFrames = data_ws.getRun().getLogData("goodfrm").value
    except RuntimeError:
        try:
            nFrames = data_ws.getRun().getLogData("good_frames").lastValue()
            AddSampleLog(Workspace=data_ws, LogName="goodfrm", LogText=str(int(nFrames)), LogType="Number")
        except RuntimeError:
            raise RuntimeError(
                "Bleed test fails as no appropriate 'good_frames' or 'goodfrm' log is loaded with ws: {0}\n"
                "Disable bleed test by setting diag_bleed_test=False or add 'goodfrm' log to the workspace\n".format(
                    data_ws.name()
                )
            )

    bleed_test, num_failed = CreatePSDBleedMask(
        InputWorkspace=data_ws,
        OutputWorkspace=ws_name,
        MaxTubeFramerate=max_framerate,
        NIgnoredCentralPixels=ignored_pixels,
    )
    return bleed_test, num_failed
Ejemplo n.º 3
0
def do_second_white_test(white_counts, comp_white_counts, tiny, large, out_lo, out_hi,
                         median_lo, median_hi, sigma, variation,
                         start_index=None, end_index=None):
    """
    Run additional tests comparing given another white beam count workspace, comparing
    to the first

    Required inputs:

      white_counts  - A workspace containing the integrated counts from a
                      white beam vanadium run
      comp_white_counts  - A workspace containing the integrated counts from a
                      white beam vanadium run
      tiny          - Minimum threshold for acceptance
      large         - Maximum threshold for acceptance
      median_lo     - Fraction of median to consider counting low
      median_hi     - Fraction of median to consider counting high
      signif          - Counts within this number of multiples of the
                      standard dev will be kept
      variation     - Defines a range within which the ratio of the two counts is
                      allowed to fall in terms of the number of medians
    """
    logger.notice('Running second white beam test')

    # What shall we call the output
    lhs_names = lhs_info('names')
    if len(lhs_names) > 0:
        ws_name = lhs_names[0]
    else:
        ws_name = '__do_second_white_test'

    # Make sure we are a MatrixWorkspace
    white_counts = ConvertToMatrixWorkspace(InputWorkspace=white_counts,OutputWorkspace=white_counts)
    comp_white_counts = ConvertToMatrixWorkspace(InputWorkspace=comp_white_counts,OutputWorkspace=comp_white_counts)

    # Do the white beam test
    __second_white_tests, failed = do_white_test(comp_white_counts, tiny, large, median_lo, median_hi,
                                                 sigma, start_index, end_index)
    # and now compare it with the first
    effic_var, num_failed = DetectorEfficiencyVariation(WhiteBeamBase=white_counts, WhiteBeamCompare=comp_white_counts,
                                                        OutputWorkspace=ws_name,
                                                        Variation=variation, StartWorkspaceIndex=start_index,
                                                        EndWorkspaceIndex=end_index)

    DeleteWorkspace(Workspace=str(__second_white_tests))
    # Mask those that failed
    maskWS = effic_var
    MaskDetectors(Workspace=white_counts, MaskedWorkspace=maskWS)
    MaskDetectors(Workspace=comp_white_counts, MaskedWorkspace=maskWS)

    return maskWS, num_failed
Ejemplo n.º 4
0
def do_second_white_test(white_counts, comp_white_counts, tiny, large, out_lo, out_hi,
                         median_lo, median_hi, sigma, variation,
                         start_index=None, end_index=None):
    """
    Run additional tests comparing given another white beam count workspace, comparing
    to the first

    Required inputs:

      white_counts  - A workspace containing the integrated counts from a
                      white beam vanadium run
      comp_white_counts  - A workspace containing the integrated counts from a
                      white beam vanadium run
      tiny          - Minimum threshold for acceptance
      large         - Maximum threshold for acceptance
      median_lo     - Fraction of median to consider counting low
      median_hi     - Fraction of median to consider counting high
      signif          - Counts within this number of multiples of the
                      standard dev will be kept
      variation     - Defines a range within which the ratio of the two counts is
                      allowed to fall in terms of the number of medians
    """
    logger.notice('Running second white beam test')

    # What shall we call the output
    lhs_names = lhs_info('names')
    if len(lhs_names) > 0:
        ws_name = lhs_names[0]
    else:
        ws_name = '__do_second_white_test'

    # Make sure we are a MatrixWorkspace
    white_counts = ConvertToMatrixWorkspace(InputWorkspace=white_counts,OutputWorkspace=white_counts)
    comp_white_counts = ConvertToMatrixWorkspace(InputWorkspace=comp_white_counts,OutputWorkspace=comp_white_counts)

    # Do the white beam test
    __second_white_tests, failed = do_white_test(comp_white_counts, tiny, large, median_lo, median_hi,
                                                 sigma, start_index, end_index)
    # and now compare it with the first
    effic_var, num_failed = DetectorEfficiencyVariation(WhiteBeamBase=white_counts, WhiteBeamCompare=comp_white_counts,
                                                        OutputWorkspace=ws_name,
                                                        Variation=variation, StartWorkspaceIndex=start_index,
                                                        EndWorkspaceIndex=end_index)

    DeleteWorkspace(Workspace=str(__second_white_tests))
    # Mask those that failed
    maskWS = effic_var
    MaskDetectors(Workspace=white_counts, MaskedWorkspace=maskWS)
    MaskDetectors(Workspace=comp_white_counts, MaskedWorkspace=maskWS)

    return maskWS, num_failed
Ejemplo n.º 5
0
def do_bleed_test(sample_run, max_framerate, ignored_pixels):
    """Runs the CreatePSDBleedMask algorithm

    Input:
    sample_run  -  The run number of the sample
    max_framerate - The maximum allowed framerate in a tube. If None, the instrument defaults are used.
    ignored_pixels - The number of central pixels to ignore. If None, the instrument defaults are used.
    """
    #NOTE: Should be deployed on non-normalized workspace only!
    logger.notice('Running PSD bleed test')
    # Load the sample run
    if __Reducer__: #  Try to use generic loader which would work with files or workspaces alike
        sample_run = __Reducer__.get_run_descriptor(sample_run)
        data_ws    = sample_run.get_workspace() # this will load data if necessary
        ws_name    = data_ws.name()+'_bleed'
    else:
        # may be sample run is already a run descriptor despite __Reducer__ have not been exposed
        data_ws    = sample_run.get_workspace() # this will load data if necessary
        ws_name    = data_ws.name()+'_bleed'

    if max_framerate is None: #get defaults
        max_framerate = float(data_ws.getInstrument().getNumberParameter('max-tube-framerate')[0])
    if ignored_pixels is None: #get defaults
        ignored_pixels = int(data_ws.getInstrument().getNumberParameter('num-ignored-pixels')[0])
    else:
        # Make sure it is an int
        ignored_pixels = int(ignored_pixels)

    # What shall we call the output
    lhs_names = lhs_info('names')
    if len(lhs_names) > 0:
        ws_name = lhs_names[0]
    else:
        ws_name = '__do_bleed__test'
    # Check if all necessary logs present in the workspace,as nxs workspace log names are different
    #  from a raw file workspace logs.
    try:
        nFrames= data_ws.getRun().getLogData('goodfrm').value
    except RuntimeError:
        try:
            nFrames = data_ws.getRun().getLogData('good_frames').lastValue()
            AddSampleLog(Workspace=data_ws, LogName='goodfrm', LogText=str(int(nFrames)), LogType='Number')
        except RuntimeError:
            raise RuntimeError("Bleed test fails as no appropriate 'good_frames' or 'goodfrm' log is loaded with ws: {0}\n"
                               "Disable bleed test by setting diag_bleed_test=False or add 'goodfrm' log to the workspace\n"
                               .format(data_ws.name()))

    bleed_test, num_failed = CreatePSDBleedMask(InputWorkspace=data_ws, OutputWorkspace=ws_name,
                                                MaxTubeFramerate=max_framerate,
                                                NIgnoredCentralPixels=ignored_pixels)
    return bleed_test, num_failed
Ejemplo n.º 6
0
    def correct_absorption(self,ws,*args,**kwargs):
        """ The generic method, which switches between fast and Monte-Carlo absorption corrections
        depending on the type and properties variable provided as second input

        kwargs is a dictionary, with at least one key, describing type of
               corrections (fast or Monte-Carlo) and other keys (if any)
               containing additional properties of the correspondent correction
               algorithm.

        Returns:
        1) absorption-corrected workspace
        2) if two output arguments are provided, second would be workspace with absorption corrections coefficients
        """
        n_outputs,var_names = funcinspect.lhs_info('both')

        if args is None or len(args) == 0:
            if kwargs is None:
                corr_properties = {}
            else:
                corr_properties = kwargs
        else:
            corr_properties = args[0]
            if corr_properties is None:
                corr_properties = {}
            else:
                if not isinstance(corr_properties,dict):
                    raise TypeError(
                        '*** Second non-keyword argument of the correct_absorption routine'
                        ' (if present) should be a dictionary containing '
                        ' additional parameters for selected AbsorptionCorrections algorithm')

        correction_base_ws = ConvertUnits(ws,'Wavelength',EMode='Direct')
        Mater_properties = self._Material
        SetSampleMaterial(correction_base_ws,**Mater_properties)

        if self._shape_has_axis:
            self._check_MARI_axis_(ws)

        if self._CanSetSample:
            shape_description = self._ShapeDescription
            SetSample(correction_base_ws,Geometry=shape_description)

        mc_corrections = corr_properties.pop('is_mc', False)
        fast_corrections = corr_properties.pop('is_fast',False)
        if  not(mc_corrections or fast_corrections) or (mc_corrections and fast_corrections):
            fast_corrections = False # Case when both keys are true or false reverts to default

        if fast_corrections:
            #raise RuntimeError('Analytical absorption corrections are not currently implemented in Direct mode')
            abs_corrections = self._fast_abs_corrections(correction_base_ws,corr_properties)
        else:
            abs_corrections = self._mc_abs_corrections(correction_base_ws,corr_properties)

        abs_corrections = ConvertUnits(abs_corrections,'DeltaE',EMode='Direct')
        ws = ws / abs_corrections

        DeleteWorkspace(correction_base_ws)
        if ws.name() != var_names[0]:
            RenameWorkspace(ws,var_names[0])
        if n_outputs == 1:
            #DeleteWorkspace(abs_corrections)
            return ws
        elif n_outputs == 2:
            if abs_corrections.name() != var_names[1]:
                RenameWorkspace(abs_corrections,var_names[1])
            return (ws,abs_corrections)
Ejemplo n.º 7
0
    def iliad_wrapper(*args):
        #seq = inspect.stack()
        # output workspace name.
        try:
            name = funcinspect.lhs_info('names')
            out_ws_name = name[0]
# no-exception-type(s) specified. Who knows what exception this internal procedure rises...
#pylint: disable=W0702
        except:
            out_ws_name = None

        host = args[0]
        if len(args) > 1:
            input_file = args[1]
            if len(args) > 2:
                output_directory = args[2]
            else:
                output_directory = None
        else:
            input_file = None
            output_directory = None
        # add input file folder to data search directory if file has it
        if input_file and isinstance(input_file, string_types):
            data_path = os.path.dirname(input_file)
            if len(data_path) > 0:
                try:
                    config.appendDataSearchDir(str(data_path))
                    args[1] = os.path.basename(input_file)
                #pylint: disable=bare-except
                except:  # if mantid is not available, this should ignore config
                    pass
        if output_directory:
            config['defaultsave.directory'] = str(output_directory)

        #pylint: disable=protected-access
        if host._run_from_web:
            #pylint: disable=protected-access
            web_vars = host._wvs.get_all_vars()
            host.reducer.prop_man.set_input_parameters(**web_vars)
        else:
            pass  # we should set already set up variables using

        custom_print_function = host.set_custom_output_filename()
        if custom_print_function is not None:
            PropertyManager.save_file_name.set_custom_print(
                custom_print_function)
        #
        rez = reduce(*args)

        # prohibit returning workspace to web services.
        #pylint: disable=protected-access
        if host._run_from_web and not isinstance(rez, string_types):
            rez = ""
        else:
            if isinstance(rez, list):
                # multirep run, just return as it is
                return rez
            if rez is not None and out_ws_name and rez.name() != out_ws_name:
                # the function does not return None, pylint is wrong
                #pylint: disable=W1111
                rez = PropertyManager.sample_run.synchronize_ws(
                    rez, out_ws_name)
        return rez
Ejemplo n.º 8
0
    def run_reduction(self):
        """" Reduces runs one by one or sum all them together and reduce after this

            if wait_for_file time is > 0, it will until  missing files appear on the
            data search path
        """
        try:
            _, r = funcinspect.lhs_info('both')
            out_ws_name = r[0]


# no-exception-type(s) specified. Who knows what exception this internal procedure rises...
#pylint: disable=W0702
        except:
            out_ws_name = None

        # if this is not None, we want to run validation not reduction
        if self.validate_run_number:
            self.reducer.prop_man.log\
                ("**************************************************************************************",'warning')
            self.reducer.prop_man.log\
                ("**************************************************************************************",'warning')
            rez, mess = self.build_or_validate_result()
            if rez:
                self.reducer.prop_man.log("*** SUCCESS! {0}".format(mess))
                self.reducer.prop_man.log\
                    ("**************************************************************************************",'warning')

            else:
                self.reducer.prop_man.log(
                    "*** VALIDATION FAILED! {0}".format(mess))
                self.reducer.prop_man.log\
                    ("**************************************************************************************",'warning')
                raise RuntimeError("Validation against old data file failed")
            self.validate_run_number = None
            return rez, mess

        if self.reducer.sum_runs:
            # --------### sum runs provided ------------------------------------###
            if out_ws_name is None:
                self.sum_and_reduce()
                return None
            else:
                red_ws = self.sum_and_reduce()
                RenameWorkspace(InputWorkspace=red_ws,
                                OutputWorkspace=out_ws_name)
                return mtd[out_ws_name]
        else:
            # --------### reduce list of runs one by one ----------------------------###
            runfiles = PropertyManager.sample_run.get_run_file_list()
            if out_ws_name is None:
                for file_name in runfiles:
                    self.reduce(file_name)
                return None
            else:
                results = []
                nruns = len(runfiles)
                for num, file_name in enumerate(runfiles):
                    red_ws = self.reduce(file_name)
                    if isinstance(red_ws, list):
                        for ws in red_ws:
                            results.append(ws)
                    else:
                        if nruns == 1:
                            if red_ws.name() != out_ws_name:
                                RenameWorkspace(InputWorkspace=red_ws,
                                                OutputWorkspace=out_ws_name)
                            results.append(mtd[out_ws_name])
                        else:
                            OutWSName = '{0}#{1}of{2}'.format(
                                out_ws_name, num + 1, nruns)
                            if red_ws.name() != out_ws_name:
                                RenameWorkspace(InputWorkspace=red_ws,
                                                OutputWorkspace=OutWSName)
                            results.append(mtd[OutWSName])
                #end
                if len(results) == 1:
                    return results[0]
                else:
                    return results
Ejemplo n.º 9
0
    def update_defaults_from_instrument(self,pInstrument,ignore_changes=False):
        """ Method used to update default parameters from the same instrument (with different parameters).

            Used if initial parameters correspond to instrument with one validity dates and
            current instrument has different validity dates and different default values for
            these dates.

            List of synonims is not modified and new properties are not added assuming that
            recent dictionary and properties are most comprehensive one

            ignore_changes==True when changes, caused by setting properties from instrument are not recorded
            ignore_changes==False -- getChangedProperties properties after applied this method would return set
                            of all properties changed when applying this method

        """
        self.reduction_instrument_warning(pInstrument)

        # Retrieve the properties, changed from interface earlier
        old_changes_list = self.getChangedProperties()

        old_changes = self.create_old_changes_dict(old_changes_list)

        param_list = prop_helpers.get_default_idf_param_list(pInstrument,self.__subst_dict)

        # remove old changes which are not related to IDF (not to reapply it again)
        old_changes = self.remove_non_IDF_changes(old_changes, param_list)

        param_list,descr_dict =  self._convert_params_to_properties(param_list,False,self.__descriptors)
        # clear record about previous changes
        self.setChangedProperties(set())

        #sort parameters to have complex properties (with underscore _) first
        sorted_param =  OrderedDict(sorted(list(param_list.items()),key=lambda x : ord((x[0][0]).lower())))

        # Walk through descriptors list and set their values
        # Assignment to descriptors should accept the form, descriptor is written in IDF
        changed_descriptors = set()
        for key,val in iteritems(descr_dict):
            if key not in old_changes_list:
                try: # this is reliability check, and except ideally should never be hit. May occur if old IDF contains
                   # properties, not present in recent IDF.
                    cur_val = getattr(self,key)
                    setattr(self,key,val)
                    new_val = getattr(self,key)
#pylint: disable=bare-except
                except:
                    try:
                        cur_val = getattr(self,key)
#pylint: disable=bare-except
                    except:
                        cur_val = "Undefined"
                    self.log("Retrieving or reapplying script property {0} failed. Property value remains: {1}"
                             .format(key,cur_val),'warning')
                    continue

                new_val, cur_val = self.perform_simplfied_ws_comparison(new_val, cur_val)

                if new_val != cur_val:
                    changed_descriptors.add(key)
                    changed_throug_main = True
                else: # property may be changed through descriptors
                    changed_throug_main  = False

                # dependencies removed either properties are equal or not
                try:
                    dependencies = getattr(PropertyManager,key).dependencies()
#pylint: disable=bare-except
                except:
                    dependencies = []

                for dep_name in dependencies:
                    if dep_name in sorted_param:
                        if changed_throug_main or (sorted_param[dep_name] == getattr(self,dep_name)):
                            del sorted_param[dep_name]
            else: # remove property from old changes list not to reapply it again?
                pass
        #end loop
        # clear record about all changes and store only changed descriptors list
        self.setChangedProperties(changed_descriptors)

        # Walk through the complex properties first and then through simple properties
        for key,val in iteritems(sorted_param.copy()):
            # complex properties may change through their dependencies so we are setting them first
            public_name = self.is_complex_property(key, val)

            if public_name not in old_changes_list:
                exception_raised = self.update_property_value(val, public_name, param_list)
                if exception_raised:
                    continue
            else:
                pass
            # Dependencies removed either properties are equal or not.
            # or if public_name for property in old change list. Remove dependencies
            # too, as property has been set up as whole.
            try:
                dependencies = val.dependencies()
#pylint: disable=bare-except
            except:
                dependencies =[]
            for dep_name in dependencies:
                # delete dependent properties not to deal with them again
                del sorted_param[dep_name]
        #end

        all_changes = self.retrieve_all_changes(old_changes, ignore_changes, old_changes_list)

        num_changes = funcinspect.lhs_info('nreturns')
        if num_changes > 0:
            return all_changes
        else:
            return None
    def run_reduction(self):
        """" Reduces runs one by one or sum all them together and reduce after this

            if wait_for_file time is > 0, it will until  missing files appear on the
            data search path
        """
        class time_logger:
            """" helper class to run using Python 'with' statement
                 and log reduction execution time and optionally
                 time spent on reducing every input file.
            """
            def __init__(self,filename):
                "filename -- short name of the log file"
                self._start_time = time.time()
                self._tick_time = self._start_time
                filepath = os.path.dirname(os.path.realpath(__file__))
                self._log_file = os.path.join(filepath,filename)
                self.fh = -1;
            def __enter__(self):
                self.fh = open(self._log_file,"w")
                lt= time.localtime(self._start_time )        
                res_memory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/(1024);
                ch_memory = resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss/(1024)
                self.fh.write("*** Test started:  {0}/{1}/{2} at {3}:{4}\n".format(lt.tm_mday,lt.tm_mon,lt.tm_year,lt.tm_hour,lt.tm_min))
                self.fh.write("*** Self Memory: {0}Kb; Kids memory {1}Kb\n".format(res_memory,ch_memory))
                pv = subprocess.check_output(['free','-m'])
                pvs = pv.split('\n')                
                self.fh.write("***      {0}\n".format(pvs[0]))                
                self.fh.write("***      {0}\n".format(pvs[1]))
                self.fh.write("***      {0}\n".format(pvs[2]))
                
                self.fh.flush()
                return self
            def __exit__(self, type, value, traceback):
                fin_time = time.time()
                lt = time.localtime(fin_time)
                self.fh.write("*** Test finished: {0}/{1}/{2} at {3}:{4}\n".format(lt.tm_mday,lt.tm_mon,lt.tm_year,lt.tm_hour,lt.tm_min))                
                self.fh.write("*** Total execution time: {0:.2f}(sec)\n".format(fin_time -self._start_time))
                self.fh.close()
            def tick(self,fileID):
                start_time = self._tick_time;
                end_time  = time.time()
                res_memory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/(1024)           
                ch_memory = resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss/(1024)
                self.fh.write("*** ---> File {0} processed in {1:.2f}sec\n".format(fileID,end_time-start_time))                
                self.fh.write("*** Self Memory: {0}Kb; Kids memory {1}Kb\n".format(math.ceil(res_memory),math.ceil(ch_memory)))
                try:
                    pv = subprocess.check_output(['free','-m'])
                    pvs = pv.split('\n')                
                    self.fh.write("***      {0}\n".format(pvs[1]))
                    self.fh.write("***      {0}\n".format(pvs[2]))
                except:
                    #ClearCache(True,True,True,True,True,True,True)                    
                    #self.fh.write("***      Can not launch subprocess to evaluate free memory. Clearing all Mantid Caches\n")
                    self.fh.write("***      Can not launch subprocess to evaluate free memory.\n")
                    
                self.fh.flush()
                self._tick_time = end_time
                
        try:
            _,r = funcinspect.lhs_info('both')
            out_ws_name = r[0]
# no-exception-type(s) specified. Who knows what exception this internal procedure rises...
#pylint: disable=W0702
        except:
            out_ws_name = None
        host = platform.node()
        host = host.replace('.','_')
        host = host.replace('-','_')
        inst  = config.getInstrument()
        count = 1
        inst_name = inst.name()
        log_file_name = "{0}_performance_{1}_test{2}.txt".format(inst_name,host,count)
        filepath = os.path.dirname(os.path.realpath(__file__))		
        ff = os.path.join(filepath,log_file_name)
        while os.path.isfile(ff):
            count = count+1;
            log_file_name = "{0}_performance_{1}_test{2}.txt".format(inst_name,host,count)
            ff = os.path.join(filepath,log_file_name)
			
        print (' ******************* storing performance data to file: ',log_file_name)
        if self.reducer.sum_runs:
# --------### sum runs provided ------------------------------------###
            with time_logger(log_file_name) as log:
                if out_ws_name is None:
                    self.sum_and_reduce()
                    return None
                else:
                    red_ws = self.sum_and_reduce()
                    RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=out_ws_name)
                    return mtd[out_ws_name]
        else:
# --------### reduce list of runs one by one ----------------------------###
            runfiles = PropertyManager.sample_run.get_run_file_list()
            with time_logger(log_file_name) as log:            
                if out_ws_name is None:
                    for file_name in runfiles:
                        self.reduce(file_name)
                        log.tick(file_name)
                    return None
                else:
                    results = []
                    nruns = len(runfiles)
                    for num,file_name in enumerate(runfiles):
                        red_ws = self.reduce(file_name)
                        log.tick(file_name)
                        if isinstance(red_ws,list):
                            for ws in red_ws:
                                results.append(ws)
                        else:
                            if nruns == 1:
                                if red_ws.name() != out_ws_name:
                                    RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=out_ws_name)
                                results.append(mtd[out_ws_name])
                            else:
                                OutWSName = '{0}#{1}of{2}'.format(out_ws_name,num+1,nruns)
                                if red_ws.name() != out_ws_name:
                                    RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=OutWSName)
                                results.append(mtd[OutWSName])
                #end
                if len(results) == 1:
                    return results[0]
                else:
                    return results
Ejemplo n.º 11
0
    def iliad_wrapper(*args):
        #seq = inspect.stack()
        # output workspace name.
        try:
            _,r = funcinspect.lhs_info('both')
            out_ws_name = r[0]
# no-exception-type(s) specified. Who knows what exception this internal procedure rises...
#pylint: disable=W0702
        except:
            out_ws_name = None

        host = args[0]
        if len(args) > 1:
            input_file = args[1]
            if len(args) > 2:
                output_directory = args[2]
            else:
                output_directory = None
        else:
            input_file = None
            output_directory = None
        # add input file folder to data search directory if file has it
        if input_file and isinstance(input_file,str):
            data_path = os.path.dirname(input_file)
            if len(data_path) > 0:
                try:
                    config.appendDataSearchDir(str(data_path))
                    args[1] = os.path.basename(input_file)
                #pylint: disable=bare-except
                except: # if mantid is not available, this should ignore config
                    pass
        if output_directory:
            config['defaultsave.directory'] = str(output_directory)

        #pylint: disable=protected-access
        if host._run_from_web:
            #pylint: disable=protected-access
            web_vars = host._wvs.get_all_vars()
            host.reducer.prop_man.set_input_parameters(**web_vars)
        else:
            pass # we should set already set up variables using

        custom_print_function = host.set_custom_output_filename()
        if custom_print_function is not None:
            PropertyManager.save_file_name.set_custom_print(custom_print_function)
        #
        rez = reduce(*args)

        # prohibit returning workspace to web services.
        #pylint: disable=protected-access
        if host._run_from_web and not isinstance(rez,str):
            rez = ""
        else:
            if isinstance(rez, list):
              # multirep run, just return as it is
                return rez
            if not(rez is None) and out_ws_name and rez.name() != out_ws_name:
            # the function does not return None, pylint is wrong
            #pylint: disable=W1111
                rez = RenameWorkspace(InputWorkspace=rez, OutputWorkspace=out_ws_name)

        return rez
Ejemplo n.º 12
0
    def run_reduction(self):
        """" Reduces runs one by one or sum all them together and reduce after this

            if wait_for_file time is > 0, it will until  missing files appear on the
            data search path
        """
        try:
            _,r = funcinspect.lhs_info('both')
            out_ws_name = r[0]
# no-exception-type(s) specified. Who knows what exception this internal procedure rises...
#pylint: disable=W0702
        except:
            out_ws_name = None

        # if this is not None, we want to run validation not reduction
        if self.validate_run_number:
            self.reducer.prop_man.log\
                ("**************************************************************************************",'warning')
            self.reducer.prop_man.log\
                ("**************************************************************************************",'warning')
            rez,mess=self.build_or_validate_result()
            if rez:
                self.reducer.prop_man.log("*** SUCCESS! {0}".format(mess))
                self.reducer.prop_man.log\
                    ("**************************************************************************************",'warning')

            else:
                self.reducer.prop_man.log("*** VALIDATION FAILED! {0}".format(mess))
                self.reducer.prop_man.log\
                    ("**************************************************************************************",'warning')
                raise RuntimeError("Validation against old data file failed")
            self.validate_run_number=None
            return rez,mess

        if self.reducer.sum_runs:
# --------### sum runs provided ------------------------------------###
            if out_ws_name is None:
                self.sum_and_reduce()
                return None
            else:
                red_ws = self.sum_and_reduce()
                RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=out_ws_name)
                return mtd[out_ws_name]
        else:
# --------### reduce list of runs one by one ----------------------------###
            runfiles = PropertyManager.sample_run.get_run_file_list()
            if out_ws_name is None:
                for file_name in runfiles:
                    self.reduce(file_name)
                return None
            else:
                results = []
                nruns = len(runfiles)
                for num,file_name in enumerate(runfiles):
                    red_ws = self.reduce(file_name)
                    if isinstance(red_ws,list):
                        for ws in red_ws:
                            results.append(ws)
                    else:
                        if nruns == 1:
                            if red_ws.name() != out_ws_name:
                                RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=out_ws_name)
                            results.append(mtd[out_ws_name])
                        else:
                            OutWSName = '{0}#{1}of{2}'.format(out_ws_name,num+1,nruns)
                            if red_ws.name() != out_ws_name:
                                RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=OutWSName)
                            results.append(mtd[OutWSName])
                #end
                if len(results) == 1:
                    return results[0]
                else:
                    return results
Ejemplo n.º 13
0
 def op_wrapper(self, other):
     # Get the result variable to know what to call the output
     result_info = lhs_info()
     # Pass off to helper
     return _do_binary_operation(algorithm, self, other, result_info,
                                 inplace, reverse)
Ejemplo n.º 14
0
def arb_units(wb_run,sample_run,ei_guess,rebin,map_file='default',monovan_run=None,second_wb=None,**kwargs):
    """ One step conversion of run into workspace containing information about energy transfer
    Usage:
    >>arb_units(wb_run,sample_run,ei_guess,rebin)

    >>arb_units(wb_run,sample_run,ei_guess,rebin,**arguments)

    >>arb_units(wb_run,sample_run,ei_guess,rebin,mapfile,**arguments)

    >>arb_units(wb_run   Whitebeam run number or file name or workspace
                sample_run  sample run number or file name or workspace
                ei_guess    Ei guess
                rebin       Rebin parameters
                mapfile     Mapfile -- if absent/'default' the defaults from IDF are used
                monovan_run If present will do the absolute units normalization. Number of additional parameters
                            specified in **kwargs is usually requested for this. If they are absent, program uses defaults,
                            but the defaults (e.g. sample_mass or sample_rmm ) are usually incorrect for a particular run.
                arguments   The dictionary containing additional keyword arguments.
                            The list of allowed additional arguments is defined in InstrName_Parameters.xml file, located in
                            MantidPlot->View->Preferences->Mantid->Directories->Parameter Definitions

    with run numbers as input:
    >>dgreduce.arb_units(1000,10001,80,[-10,.1,70])  # will run on default instrument

    >>dgreduce.arb_units(1000,10001,80,[-10,.1,70],'mari_res', additional keywords as required)

    >>dgreduce.arb_units(1000,10001,80,'-10,.1,70','mari_res',fixei=True)

    A detector calibration file must be specified if running the reduction with workspaces as input
    namely:
    >>w2=iliad("wb_wksp","run_wksp",ei,rebin_params,mapfile,det_cal_file=cal_file
               ,diag_remove_zero=False,norm_method='current')


    type help() for the list of all available keywords. All availible keywords are provided in InstName_Parameters.xml file


    Some samples are:
    norm_method =[monitor-1],[monitor-2][Current]
    background  =False , True
    fixei       =False , True
    save_format =['.spe'],['.nxspe'],'none'
    detector_van_range          =[20,40] in mev

    bkgd_range  =[15000,19000]  :integration range for background tests

    second_white     - If provided an additional set of tests is performed on this. (default = None)
    hardmaskPlus     - A file specifying those spectra that should be masked without testing (default=None)
    tiny             - Minimum threshold for acceptance (default = 1e-10)
    large            - Maximum threshold for acceptance (default = 1e10)
    bkgd_range       - A list of two numbers indicating the background range (default=instrument defaults)
    diag_van_median_rate_limit_lo      - Lower bound defining outliers as fraction of median value (default = 0.01)
    diag_van_median_rate_limit_hi      - Upper bound defining outliers as fraction of median value (default = 100.)
    diag_van_median_sigma_lo           - Fraction of median to consider counting low for the white beam diag (default = 0.1)
    diag_van_median_sigma_hi           - Fraction of median to consider counting high for the white beam diag (default = 1.5)
    diag_van_sig  - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
                    difference with respect to the median value must also exceed this number of error bars (default=0.0)
    diag_remove_zero                - If true then zeroes in the vanadium data will count as failed (default = True)
    diag_samp_samp_median_sigma_lo  - Fraction of median to consider counting low for the white beam diag (default = 0)
    diag_samp_samp_median_sigma_hi  - Fraction of median to consider counting high for the white beam diag (default = 2.0)
    diag_samp_sig                   - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the"
                                      difference with respect to the median value must also exceed this number of error bars (default=3.3)
    variation       -The number of medians the ratio of the first/second white beam can deviate from
                     the average by (default=1.1)
    bleed_test      - If true then the CreatePSDBleedMask algorithm is run
    bleed_maxrate   - If the bleed test is on then this is the maximum framerate allowed in a tube
    bleed_pixels    - If the bleed test is on then this is the number of pixels ignored within the
                       bleed test diagnostic
    print_results - If True then the results are printed to the screen

    diag_remove_zero =True, False (default):Diag zero counts in background range
    bleed=True , turn bleed correction on and off on by default for Merlin and LET

    sum =True,False(default) , sum multiple files

    det_cal_file= a valid detector block file and path or a raw file. Setting this
                  will use the detector calibraion from the specified file NOT the
                  input raw file
    mask_run = RunNumber to use for diag instead of the input run number

    one2one =True, False :Reduction will not use a mapping file

    hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask

    hardmaskOnly=Filename :load a hardmask and use as only mask

    """
    global Reducer
    if Reducer is None or Reducer.instrument is None:
        raise ValueError("instrument has not been defined, call setup(instrument_name) first.")
# --------------------------------------------------------------------------------------------------------
#    Deal with mandatory parameters for this and may be some top level procedures
# --------------------------------------------------------------------------------------------------------
    if sample_run:
        Reducer.sample_run = sample_run
        sample_run = None
    try:
        n,r=funcinspect.lhs_info('both')
        wksp_out=r[0]
    except:
        wksp_out = "reduced_ws"
    #
    res = Reducer.convert_to_energy(wb_run,sample_run,ei_guess,rebin,map_file,monovan_run,second_wb,**kwargs)
    #
    results_name = res.name()
    if results_name != wksp_out:
        RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)

    return res
Ejemplo n.º 15
0
def abs_units(wb_for_run,sample_run,monovan_run,wb_for_monovanadium,samp_rmm,samp_mass,
              ei_guess,rebin,map_file='default',monovan_mapfile='default',**kwargs):
    """
    dgreduce.abs_units(wb_run          Whitebeam run number or file name or workspace
                  sample_run          Sample run run number or file name or workspace
                  monovan_run          Monochromatic run run number or file name or workspace
                  wb_mono          White beam for Monochromatic run run number or file name or workspace
                  samp_rmm          Mass of formula unit of sample
                  samp_mass          Actual sample mass
                  ei_guess          Ei guess of run
                  rebin          Rebin parameters for output data
                  map_file          Mapfile for sample run
                  monovan_mapfile     Mapfile for mono van run
                  keyword arguments     Any specified additional keyword arguments

    Example with run numbers
    abs_units(11001,11002,11003,10098,250.1,5.2,80,'-10,.1,75','mari_res','mari_res')

    A detector calibration file must be specified if running the reduction with workspace inputs

    Example with workspace inputs

    abs_units('wb_run','sam_run','mono_run','wb_for_mono',250.1,5.2,80,'-10,.1,75','mari_res','mari_res',
                   det_cal_file=10001,diag_remove_zero=False,norm_method='current')


    A detector calibration file must be specified if running the reduction with workspace inputs
    Available keywords
    norm_method =[monitor-1],[monitor-2][Current]
    background  =False , True
    fixei       =False , True
    save_format =['.spe'],['.nxspe'],'none'
    detector_van_range          =[20,40] in mev

    bkgd_range  =[15000,19000]  :integration range for background tests

    second_white    - If provided an additional set of tests is performed on this. (default = None)
    hard_mask_file       - A file specifying those spectra that should be masked without testing (default=None)
    tiny            - Minimum threshold for acceptance (default = 1e-10)
    large           - Maximum threshold for acceptance (default = 1e10)
    bkgd_range      - A list of two numbers indicating the background range (default=instrument defaults)
    diag_van_median_rate_limit_lo   - Lower bound defining outliers as fraction of median value (default = 0.01)
    diag_van_median_rate_limit_hi   - Upper bound defining outliers as fraction of median value (default = 100.)
    diag_van_median_sigma_lo        - Fraction of median to consider counting low for the white beam diag (default = 0.1)
    diag_van_median_sigma_hi        - Fraction of median to consider counting high for the white beam diag (default = 1.5)
    diag_van_sig  - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
                    difference with respect to the median value must also exceed this number of error bars (default=0.0)
    diag_remove_zero                - If true then zeros in the vanadium data will count as failed (default = True)
    diag_samp_samp_median_sigma_lo  - Fraction of median to consider counting low for the white beam diag (default = 0)
    diag_samp_samp_median_sigma_hi  - Fraction of median to consider counting high for the white beam diag (default = 2.0)
    diag_samp_sig                   - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the"
                                      difference with respect to the median value must also exceed this number of error bars (default=3.3)
    variation       -The number of medians the ratio of the first/second white beam can deviate from
                    the average by (default=1.1)
    bleed_test      - If true then the CreatePSDBleedMask algorithm is run
    bleed_maxrate   - If the bleed test is on then this is the maximum frame rate allowed in a tube
    bleed_pixels    - If the bleed test is on then this is the number of pixels ignored within the
                    bleed test diagnostic
    print_results - If True then the results are printed to the screen

    diag_remove_zero =True, False (default):Diag zero counts in background range

    bleed=True , turn bleed correction on and off on by default for Merlin and LET

    sum =True,False(default) , sum multiple files

    det_cal_file= a valid detector block file and path or a raw file. Setting this
                     will use the detector calibration from the specified file NOT the
                     input raw file
    mask_run = RunNumber to use for diag instead of the input run number

    one2one =True, False :Reduction will not use a mapping file

    hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask

    hardmaskOnly=Filename :load a hardmask and use as only mask

    use_sam_msk_on_monovan=False This will set the total mask to be that of the sample run

    abs_units_van_range=[-40,40] integral range for absolute vanadium data

    mono_correction_factor=float User specified correction factor for absolute units normalization
    """

    kwargs['monovan_mapfile']    = monovan_mapfile
    kwargs['sample_mass']        = samp_mass
    kwargs['sample_rmm']         = samp_rmm

    if sample_run:
        Reducer.sample_run = sample_run
        sample_run = None

    try:
        n,r=funcinspect.lhs_info('both')
        results_name=r[0]
    except:
        results_name = Reducer.prop_man.get_sample_ws_name()
    if runs_are_equal(wb_for_run,wb_for_monovanadium):# wb_for_monovanadium property does not accept duplicated workspace
        wb_for_monovanadium = None        # if this value is none, it is constructed to be equal to wb_for_run

    wksp_out = arb_units(wb_for_run,sample_run,ei_guess,rebin,map_file,monovan_run,wb_for_monovanadium,**kwargs)

    if  results_name != wksp_out.getName():
        RenameWorkspace(InputWorkspace=wksp_out,OutputWorkspace=results_name)

    return wksp_out
Ejemplo n.º 16
0
def abs_units(wb_for_run,sample_run,monovan_run,wb_for_monovanadium,samp_rmm,samp_mass,
              ei_guess,rebin,map_file='default',monovan_mapfile='default',**kwargs):
    """
    dgreduce.abs_units(wb_run          Whitebeam run number or file name or workspace
                  sample_run          Sample run run number or file name or workspace
                  monovan_run          Monochromatic run run number or file name or workspace
                  wb_mono          White beam for Monochromatic run run number or file name or workspace
                  samp_rmm          Mass of formula unit of sample
                  samp_mass          Actual sample mass
                  ei_guess          Ei guess of run
                  rebin          Rebin parameters for output data
                  map_file          Mapfile for sample run
                  monovan_mapfile     Mapfile for mono van run
                  keyword arguments     Any specified additional keyword arguments

    Example with run numbers
    abs_units(11001,11002,11003,10098,250.1,5.2,80,'-10,.1,75','mari_res','mari_res')

    A detector calibration file must be specified if running the reduction with workspace inputs

    Example with workspace inputs

    abs_units('wb_run','sam_run','mono_run','wb_for_mono',250.1,5.2,80,'-10,.1,75','mari_res','mari_res',
                   det_cal_file=10001,diag_remove_zero=False,norm_method='current')


    A detector calibration file must be specified if running the reduction with workspace inputs
    Available keywords
    norm_method =[monitor-1],[monitor-2][Current]
    background  =False , True
    fixei       =False , True
    save_format =['.spe'],['.nxspe'],'none'
    detector_van_range          =[20,40] in mev

    bkgd_range  =[15000,19000]  :integration range for background tests

    second_white    - If provided an additional set of tests is performed on this. (default = None)
    hard_mask_file       - A file specifying those spectra that should be masked without testing (default=None)
    tiny            - Minimum threshold for acceptance (default = 1e-10)
    large           - Maximum threshold for acceptance (default = 1e10)
    bkgd_range      - A list of two numbers indicating the background range (default=instrument defaults)
    diag_van_median_rate_limit_lo   - Lower bound defining outliers as fraction of median value (default = 0.01)
    diag_van_median_rate_limit_hi   - Upper bound defining outliers as fraction of median value (default = 100.)
    diag_van_median_sigma_lo        - Fraction of median to consider counting low for the white beam diag (default = 0.1)
    diag_van_median_sigma_hi        - Fraction of median to consider counting high for the white beam diag (default = 1.5)
    diag_van_sig  - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
                    difference with respect to the median value must also exceed this number of error bars (default=0.0)
    diag_remove_zero                - If true then zeros in the vanadium data will count as failed (default = True)
    diag_samp_samp_median_sigma_lo  - Fraction of median to consider counting low for the white beam diag (default = 0)
    diag_samp_samp_median_sigma_hi  - Fraction of median to consider counting high for the white beam diag (default = 2.0)
    diag_samp_sig                   - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the"
                                      difference with respect to the median value must also exceed this number of error bars (default=3.3)
    variation       -The number of medians the ratio of the first/second white beam can deviate from
                    the average by (default=1.1)
    bleed_test      - If true then the CreatePSDBleedMask algorithm is run
    bleed_maxrate   - If the bleed test is on then this is the maximum frame rate allowed in a tube
    bleed_pixels    - If the bleed test is on then this is the number of pixels ignored within the
                    bleed test diagnostic
    print_results - If True then the results are printed to the screen

    diag_remove_zero =True, False (default):Diag zero counts in background range

    bleed=True , turn bleed correction on and off on by default for Merlin and LET

    sum =True,False(default) , sum multiple files

    det_cal_file= a valid detector block file and path or a raw file. Setting this
                     will use the detector calibration from the specified file NOT the
                     input raw file
    mask_run = RunNumber to use for diag instead of the input run number

    one2one =True, False :Reduction will not use a mapping file

    hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask

    hardmaskOnly=Filename :load a hardmask and use as only mask

    use_sam_msk_on_monovan=False This will set the total mask to be that of the sample run

    abs_units_van_range=[-40,40] integral range for absolute vanadium data

    mono_correction_factor=float User specified correction factor for absolute units normalization
    """

    kwargs['monovan_mapfile']    = monovan_mapfile
    kwargs['sample_mass']        = samp_mass
    kwargs['sample_rmm']         = samp_rmm

    if sample_run:
        Reducer.sample_run = sample_run
        sample_run = None

    try:
        n,r=funcinspect.lhs_info('both')
        results_name=r[0]
    except:
        results_name = Reducer.prop_man.get_sample_ws_name()
    if runs_are_equal(wb_for_run,wb_for_monovanadium):# wb_for_monovanadium property does not accept duplicated workspace
        wb_for_monovanadium = None        # if this value is none, it is constructed to be equal to wb_for_run

    wksp_out = arb_units(wb_for_run,sample_run,ei_guess,rebin,map_file,monovan_run,wb_for_monovanadium,**kwargs)

    if  results_name != wksp_out.name():
        RenameWorkspace(InputWorkspace=wksp_out,OutputWorkspace=results_name)

    return wksp_out
Ejemplo n.º 17
0
    def update_defaults_from_instrument(self,pInstrument,ignore_changes=False):
        """ Method used to update default parameters from the same instrument (with different parameters).

            Used if initial parameters correspond to instrument with one validity dates and
            current instrument has different validity dates and different default values for
            these dates.

            List of synonims is not modified and new properties are not added assuming that
            recent dictionary and properties are most comprehensive one

            ignore_changes==True when changes, caused by setting properties from instrument are not recorded
            ignore_changes==False -- getChangedProperties properties after applied this method would return set
                            of all properties changed when applying this method

        """
        self.reduction_instrument_warning(pInstrument)

        # Retrieve the properties, changed from interface earlier
        old_changes_list = self.getChangedProperties()

        old_changes = self.create_old_changes_dict(old_changes_list)

        param_list = prop_helpers.get_default_idf_param_list(pInstrument,self.__subst_dict)

        # remove old changes which are not related to IDF (not to reapply it again)
        old_changes = self.remove_non_IDF_changes(old_changes, param_list)

        param_list,descr_dict =  self._convert_params_to_properties(param_list,False,self.__descriptors)
        # clear record about previous changes
        self.setChangedProperties(set())

        #sort parameters to have complex properties (with underscore _) first
        sorted_param =  OrderedDict(sorted(list(param_list.items()),key=lambda x : ord((x[0][0]).lower())))

        # Walk through descriptors list and set their values
        # Assignment to descriptors should accept the form, descriptor is written in IDF
        changed_descriptors = set()
        for key,val in iteritems(descr_dict):
            if key not in old_changes_list:
                try: # this is reliability check, and except ideally should never be hit. May occur if old IDF contains
                   # properties, not present in recent IDF.
                    cur_val = getattr(self,key)
                    setattr(self,key,val)
                    new_val = getattr(self,key)
#pylint: disable=bare-except
                except:
                    try:
                        cur_val = getattr(self,key)
#pylint: disable=bare-except
                    except:
                        cur_val = "Undefined"
                    self.log("Retrieving or reapplying script property {0} failed. Property value remains: {1}"
                             .format(key,cur_val),'warning')
                    continue

                new_val, cur_val = self.perform_simplfied_ws_comparison(new_val, cur_val)

                if new_val != cur_val:
                    changed_descriptors.add(key)
                    changed_throug_main = True
                else: # property may be changed through descriptors
                    changed_throug_main  = False

                # dependencies removed either properties are equal or not
                try:
                    dependencies = getattr(PropertyManager,key).dependencies()
#pylint: disable=bare-except
                except:
                    dependencies = []

                for dep_name in dependencies:
                    if dep_name in sorted_param:
                        if changed_throug_main or (sorted_param[dep_name] == getattr(self,dep_name)):
                            del sorted_param[dep_name]
            else: # remove property from old changes list not to reapply it again?
                pass
        #end loop
        # clear record about all changes and store only changed descriptors list
        self.setChangedProperties(changed_descriptors)

        # Walk through the complex properties first and then through simple properties
        for key,val in iteritems(sorted_param.copy()):
            # complex properties may change through their dependencies so we are setting them first
            public_name = self.is_complex_property(key, val)

            if public_name not in old_changes_list:
                exception_raised = self.update_property_value(val, public_name, param_list)
                if exception_raised:
                    continue
            else:
                pass
            # Dependencies removed either properties are equal or not.
            # or if public_name for property in old change list. Remove dependencies
            # too, as property has been set up as whole.
            try:
                dependencies = val.dependencies()
#pylint: disable=bare-except
            except:
                dependencies =[]
            for dep_name in dependencies:
                # delete dependent properties not to deal with them again
                del sorted_param[dep_name]
        #end

        all_changes = self.retrieve_all_changes(old_changes, ignore_changes, old_changes_list)

        num_changes = funcinspect.lhs_info('nreturns')
        if num_changes > 0:
            return all_changes
        else:
            return None
Ejemplo n.º 18
0
 def op_wrapper(self, other):
     # Get the result variable to know what to call the output
     result_info = lhs_info()
     # Pass off to helper
     return _do_binary_operation(algorithm, self, other, result_info,
                                 inplace, reverse)
Ejemplo n.º 19
0
 def op_wrapper(self):
     # Get the result variable to know what to call the output
     result_info = lhs_info()
     # Pass off to helper
     return _do_unary_operation(algorithm, self, result_info)
Ejemplo n.º 20
0
    def update_defaults_from_instrument(self,
                                        pInstrument,
                                        ignore_changes=False):
        """ Method used to update default parameters from the same instrument (with different parameters).

            Used if initial parameters correspond to instrument with one validity dates and
            current instrument has different validity dates and different default values for
            these dates.

            List of synonims is not modified and new properties are not added assuming that
            recent dictionary and properties are most comprehensive one

            ignore_changes==True when changes, caused by setting properties from instrument are not recorded
            ignore_changes==False -- getChangedProperties properties after applied this method would return set
                            of all properties changed when applying this method

        """
        if self.instr_name != pInstrument.getName():
            self.log(
                "*** WARNING: Setting reduction properties of the instrument {0} from the instrument {1}.\n"
                "*** This only works if both instruments have the same reduction properties!"
                .format(self.instr_name, pInstrument.getName()), 'warning')

        # Retrieve the properties, changed from interface earlier
        old_changes_list = self.getChangedProperties()
        # record all changes, present in the old changes list
        old_changes = OrderedDict()
        for prop_name in old_changes_list:
            old_changes[prop_name] = getattr(self, prop_name)

        param_list = prop_helpers.get_default_idf_param_list(
            pInstrument, self.__subst_dict)
        # remove old changes which are not related to IDF (not to reapply it again)
        for prop_name in old_changes.copy():
            if prop_name not in param_list:
                try:
                    dependencies = getattr(PropertyManager,
                                           prop_name).dependencies()
#pylint: disable=bare-except
                except:
                    dependencies = []
                modified = False
                for name in dependencies:
                    if name in param_list:
                        modified = True
                        # old parameter have been modified through compound parameter.
                        #its old value is irrelevant
                        param_list[name] = getattr(self, name)
                if not modified:
                    del old_changes[prop_name]
        #end

        param_list, descr_dict = self._convert_params_to_properties(
            param_list, False, self.__descriptors)
        # clear record about previous changes
        self.setChangedProperties(set())

        #sort parameters to have complex properties (with underscore _) first
        sorted_param = OrderedDict(
            sorted(list(param_list.items()),
                   key=lambda x: ord((x[0][0]).lower())))

        # Walk through descriptors list and set their values
        # Assignment to descriptors should accept the form, descriptor is written in IDF
        changed_descriptors = set()
        for key, val in iteritems(descr_dict):
            if key not in old_changes_list:
                try:  # this is reliability check, and except ideally should never be hit. May occur if old IDF contains
                    # properties, not present in recent IDF.
                    cur_val = getattr(self, key)
                    setattr(self, key, val)
                    new_val = getattr(self, key)
#pylint: disable=bare-except
                except:
                    try:
                        cur_val = getattr(self, key)
#pylint: disable=bare-except
                    except:
                        cur_val = "Undefined"
                    self.log(
                        "Retrieving or reapplying script property {0} failed. Property value remains: {1}"
                        .format(key, cur_val), 'warning')
                    continue
                if isinstance(new_val, api.Workspace) and isinstance(
                        cur_val, api.Workspace):
                    # do simplified workspace comparison which is appropriate here
                    if new_val.name() == cur_val.name() and \
                            new_val.getNumberHistograms() == cur_val.getNumberHistograms() and \
                            new_val.getNEvents() == cur_val.getNEvents() and \
                            new_val.getAxis(0).getUnit().unitID() == cur_val.getAxis(0).getUnit().unitID():
                        new_val = 1
                        cur_val = 1
                #
                #end
                if new_val != cur_val:
                    changed_descriptors.add(key)
                    changed_throug_main = True
                else:  # property may be changed through descriptors
                    changed_throug_main = False

                # dependencies removed either properties are equal or not
                try:
                    dependencies = getattr(PropertyManager, key).dependencies()
#pylint: disable=bare-except
                except:
                    dependencies = []

                for dep_name in dependencies:
                    if dep_name in sorted_param:
                        if changed_throug_main or (sorted_param[dep_name]
                                                   == getattr(self, dep_name)):
                            del sorted_param[dep_name]
            else:  # remove property from old changes list not to reapply it again?
                pass
        #end loop
        # clear record about all changes and store only changed descriptors list
        self.setChangedProperties(changed_descriptors)

        # Walk through the complex properties first and then through simple properties
        for key, val in iteritems(sorted_param.copy()):
            # complex properties may change through their dependencies so we are setting them first
            if isinstance(val, prop_helpers.ComplexProperty):
                public_name = key[1:]
            else:
                # no complex properties left so we have simple key-value pairs
                public_name = key
            if public_name not in old_changes_list:
                if isinstance(val, prop_helpers.ComplexProperty):
                    prop_idf_val = val.__get__(param_list)
                else:
                    prop_idf_val = val

                try:  # this is reliability check, and except ideally should never be hit. May occur if old IDF contains
                    # properties, not present in recent IDF.
                    cur_val = getattr(self, public_name)
#pylint: disable=bare-except
                except:
                    self.log(
                        "Can not retrieve property {0} value from existing reduction parameters. Ignoring this property"
                        .format(public_name), 'warning')
                    continue

                if prop_idf_val != cur_val:
                    setattr(self, public_name, prop_idf_val)
            else:
                pass
            # Dependencies removed either properties are equal or not.
            # or if public_name for property in old change list. Remove dependencies
            # too, as property has been set up as whole.
            try:
                dependencies = val.dependencies()
#pylint: disable=bare-except
            except:
                dependencies = []
            for dep_name in dependencies:
                # delete dependent properties not to deal with them again
                del sorted_param[dep_name]
        #end

        new_changes_list = self.getChangedProperties()
        self.setChangedProperties(set())
        # set back all changes stored earlier and may be overwritten by new IDF
        # (this is just to be sure -- should not change anything as we do not set properties changed)
        for key, val in iteritems(old_changes):
            setattr(self, key, val)

        # Clear changed properties list (is this wise?, may be we want to know that some defaults changed?)
        if ignore_changes:
            self.setChangedProperties(old_changes_list)
            all_changes = old_changes
        else:
            new_changes_list = new_changes_list.union(old_changes_list)
            all_changes = old_changes_list.union(new_changes_list)
            self.setChangedProperties(all_changes)

        n = funcinspect.lhs_info('nreturns')
        if n > 0:
            return all_changes
        else:
            return None
Ejemplo n.º 21
0
def arb_units(wb_run,sample_run,ei_guess,rebin,map_file='default',monovan_run=None,second_wb=None,**kwargs):
    """ One step conversion of run into workspace containing information about energy transfer
    Usage:
    >>arb_units(wb_run,sample_run,ei_guess,rebin)

    >>arb_units(wb_run,sample_run,ei_guess,rebin,**arguments)

    >>arb_units(wb_run,sample_run,ei_guess,rebin,mapfile,**arguments)

    >>arb_units(wb_run   Whitebeam run number or file name or workspace
                sample_run  sample run number or file name or workspace
                ei_guess    Ei guess
                rebin       Rebin parameters
                mapfile     Mapfile -- if absent/'default' the defaults from IDF are used
                monovan_run If present will do the absolute units normalization. Number of additional parameters
                            specified in **kwargs is usually requested for this. If they are absent, program uses defaults,
                            but the defaults (e.g. sample_mass or sample_rmm ) are usually incorrect for a particular run.
                arguments   The dictionary containing additional keyword arguments.
                            The list of allowed additional arguments is defined in InstrName_Parameters.xml file, located in
                            MantidPlot->View->Preferences->Mantid->Directories->Parameter Definitions

    with run numbers as input:
    >>dgreduce.arb_units(1000,10001,80,[-10,.1,70])  # will run on default instrument

    >>dgreduce.arb_units(1000,10001,80,[-10,.1,70],'mari_res', additional keywords as required)

    >>dgreduce.arb_units(1000,10001,80,'-10,.1,70','mari_res',fixei=True)

    A detector calibration file must be specified if running the reduction with workspaces as input
    namely:
    >>w2=iliad("wb_wksp","run_wksp",ei,rebin_params,mapfile,det_cal_file=cal_file
               ,diag_remove_zero=False,norm_method='current')


    type help() for the list of all available keywords. All availible keywords are provided in InstName_Parameters.xml file


    Some samples are:
    norm_method =[monitor-1],[monitor-2][Current]
    background  =False , True
    fixei       =False , True
    save_format =['.spe'],['.nxspe'],'none'
    detector_van_range          =[20,40] in mev

    bkgd_range  =[15000,19000]  :integration range for background tests

    second_white     - If provided an additional set of tests is performed on this. (default = None)
    hardmaskPlus     - A file specifying those spectra that should be masked without testing (default=None)
    tiny             - Minimum threshold for acceptance (default = 1e-10)
    large            - Maximum threshold for acceptance (default = 1e10)
    bkgd_range       - A list of two numbers indicating the background range (default=instrument defaults)
    diag_van_median_rate_limit_lo      - Lower bound defining outliers as fraction of median value (default = 0.01)
    diag_van_median_rate_limit_hi      - Upper bound defining outliers as fraction of median value (default = 100.)
    diag_van_median_sigma_lo           - Fraction of median to consider counting low for the white beam diag (default = 0.1)
    diag_van_median_sigma_hi           - Fraction of median to consider counting high for the white beam diag (default = 1.5)
    diag_van_sig  - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the
                    difference with respect to the median value must also exceed this number of error bars (default=0.0)
    diag_remove_zero                - If true then zeroes in the vanadium data will count as failed (default = True)
    diag_samp_samp_median_sigma_lo  - Fraction of median to consider counting low for the white beam diag (default = 0)
    diag_samp_samp_median_sigma_hi  - Fraction of median to consider counting high for the white beam diag (default = 2.0)
    diag_samp_sig                   - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the"
                                      difference with respect to the median value must also exceed this number of error bars (default=3.3)
    variation       -The number of medians the ratio of the first/second white beam can deviate from
                     the average by (default=1.1)
    bleed_test      - If true then the CreatePSDBleedMask algorithm is run
    bleed_maxrate   - If the bleed test is on then this is the maximum framerate allowed in a tube
    bleed_pixels    - If the bleed test is on then this is the number of pixels ignored within the
                       bleed test diagnostic
    print_results - If True then the results are printed to the screen

    diag_remove_zero =True, False (default):Diag zero counts in background range
    bleed=True , turn bleed correction on and off on by default for Merlin and LET

    sum =True,False(default) , sum multiple files

    det_cal_file= a valid detector block file and path or a raw file. Setting this
                  will use the detector calibraion from the specified file NOT the
                  input raw file
    mask_run = RunNumber to use for diag instead of the input run number

    one2one =True, False :Reduction will not use a mapping file

    hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask

    hardmaskOnly=Filename :load a hardmask and use as only mask

    """
    global Reducer
    if Reducer is None or Reducer.instrument is None:
        raise ValueError("instrument has not been defined, call setup(instrument_name) first.")
# --------------------------------------------------------------------------------------------------------
#    Deal with mandatory parameters for this and may be some top level procedures
# --------------------------------------------------------------------------------------------------------
    if sample_run:
        Reducer.sample_run = sample_run
        sample_run = None
    try:
        n,r=funcinspect.lhs_info('both')
        wksp_out=r[0]
    except:
        wksp_out = "reduced_ws"
    #
    res = Reducer.convert_to_energy(wb_run,sample_run,ei_guess,rebin,map_file,monovan_run,second_wb,**kwargs)
    #
    results_name = res.name()
    if results_name != wksp_out:
        RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)

    return res
Ejemplo n.º 22
0
 def op_wrapper(self):
     # Get the result variable to know what to call the output
     result_info = lhs_info()
     # Pass off to helper
     return _do_unary_operation(algorithm, self, result_info)