def do_background_test(background_int, median_lo, median_hi, sigma, mask_zero,\ start_index=None, end_index=None): """ Run the background tests Required inputs: background_int - An integrated workspace median_lo - Fraction of median to consider counting low median_hi - Fraction of median to consider counting high sigma - Error criterion as a multiple of error bar mask_zero - If True, zero background counts will be considered a fail """ logger.notice('Running background count test') # What shall we call the output lhs_names = lhs_info('names') if len(lhs_names) > 0: ws_name = lhs_names[0] else: ws_name = '__do_background_test' mask_bkgd, num_failures = MedianDetectorTest( InputWorkspace=background_int, StartWorkspaceIndex=start_index, EndWorkspaceIndex=end_index, SignificanceTest=sigma, LowThreshold=median_lo, HighThreshold=median_hi, LowOutlier=0.0, HighOutlier=1e100, ExcludeZeroesFromMedian=True) #TODO: Looks like hack! why it returns negative value return mask_bkgd, abs(num_failures)
def do_background_test(background_int, median_lo, median_hi, sigma, mask_zero, start_index=None, end_index=None): """ Run the background tests Required inputs: background_int - An integrated workspace median_lo - Fraction of median to consider counting low median_hi - Fraction of median to consider counting high sigma - Error criterion as a multiple of error bar mask_zero - If True, zero background counts will be considered a fail """ logger.notice('Running background count test') # What shall we call the output lhs_names = lhs_info('names') if len(lhs_names) > 0: ws_name = lhs_names[0] else: ws_name = '__do_background_test' mask_bkgd, num_failures = MedianDetectorTest(InputWorkspace=background_int, StartWorkspaceIndex=start_index, EndWorkspaceIndex=end_index, SignificanceTest=sigma, LowThreshold=median_lo, HighThreshold=median_hi, LowOutlier=0.0, HighOutlier=1e100, ExcludeZeroesFromMedian=True) return mask_bkgd, num_failures
def do_bleed_test(sample_run, max_framerate, ignored_pixels): """Runs the CreatePSDBleedMask algorithm Input: sample_run - The run number of the sample max_framerate - The maximum allowed framerate in a tube. If None, the instrument defaults are used. ignored_pixels - The number of central pixels to ignore. If None, the instrument defaults are used. """ logger.notice('Running PSD bleed test') # Load the sample run data_ws = common.load_run(sample_run) if max_framerate is None: max_framerate = float(data_ws.getInstrument().getNumberParameter('max-tube-framerate')[0]) if ignored_pixels is None: ignored_pixels = int(data_ws.getInstrument().getNumberParameter('num-ignored-pixels')[0]) else: # Make sure it is an int ignored_pixels = int(ignored_pixels) # What shall we call the output lhs_names = lhs_info('names') if len(lhs_names) > 0: ws_name = lhs_names[0] else: ws_name = '__do_bleed__test' bleed_test, num_failed = CreatePSDBleedMask(InputWorkspace=data_ws, OutputWorkspace=ws_name, MaxTubeFramerate=max_framerate, NIgnoredCentralPixels=ignored_pixels) return bleed_test, num_failed
def do_bleed_test(sample_run, max_framerate, ignored_pixels): """Runs the CreatePSDBleedMask algorithm Input: sample_run - The run number of the sample max_framerate - The maximum allowed framerate in a tube. If None, the instrument defaults are used. ignored_pixels - The number of central pixels to ignore. If None, the instrument defaults are used. """ #NOTE: Should be deployed on non-normalized workspace only! logger.notice('Running PSD bleed test') # Load the sample run if __Reducer__: # Try to use generic loader which would work with files or workspaces alike sample_run = __Reducer__.get_run_descriptor(sample_run) data_ws = sample_run.get_workspace( ) # this will load data if necessary ws_name = data_ws.name() + '_bleed' else: # may be sample run is already a run descriptor despite __Reducer__ have not been exposed data_ws = sample_run.get_workspace( ) # this will load data if necessary ws_name = data_ws.name() + '_bleed' if max_framerate is None: #get defaults max_framerate = float(data_ws.getInstrument().getNumberParameter( 'max-tube-framerate')[0]) if ignored_pixels is None: #get defaults ignored_pixels = int(data_ws.getInstrument().getNumberParameter( 'num-ignored-pixels')[0]) else: # Make sure it is an int ignored_pixels = int(ignored_pixels) # What shall we call the output lhs_names = lhs_info('names') if len(lhs_names) > 0: ws_name = lhs_names[0] else: ws_name = '__do_bleed__test' # Check if all necessary logs present in the workspace,as nxs workspace log names are different # from a raw file workspace logs. try: nFrames = data_ws.getRun().getLogData('goodfrm').value except RuntimeError: try: nFrames = data_ws.getRun().getLogData('good_frames').lastValue() AddSampleLog(Workspace=data_ws, LogName='goodfrm', LogText=str(int(nFrames)), LogType='Number') except RuntimeError: raise RuntimeError("Bleed test fails as no appropriate 'good_frames' or 'goodfrm' log is loaded with ws: {0}\n" "Disable bleed test by setting diag_bleed_test=False or add 'goodfrm' log to the workspace\n"\ .format(data_ws.name())) bleed_test, num_failed = CreatePSDBleedMask( InputWorkspace=data_ws, OutputWorkspace=ws_name, MaxTubeFramerate=max_framerate, NIgnoredCentralPixels=ignored_pixels) return bleed_test, num_failed
def iliad_wrapper(*args): #seq = inspect.stack() # output workspace name. try: _,r = funcreturns.lhs_info('both') out_ws_name = r[0] except: out_ws_name = None host = args[0] if len(args) > 1: input_file = args[1] if len(args) > 2: output_directory = args[2] else: output_directory = None else: input_file = None output_directory = None # add input file folder to data search directory if file has it if input_file and isinstance(input_file,str): data_path = os.path.dirname(input_file) if len(data_path) > 0: try: config.appendDataSearchDir(str(data_path)) args[1] = os.path.basename(input_file) #pylint: disable=bare-except except: # if mantid is not available, this should ignore config pass if output_directory: config['defaultsave.directory'] = str(output_directory) #pylint: disable=protected-access if host._run_from_web: #pylint: disable=protected-access web_vars = host._wvs.get_all_vars() host.reducer.prop_man.set_input_parameters(**web_vars) else: pass # we should set already set up variables using custom_print_function = host.set_custom_output_filename() if not custom_print_function is None: PropertyManager.save_file_name.set_custom_print(custom_print_function) # rez = reduce(*args) # prohibit returning workspace to web services. #pylint: disable=protected-access if host._run_from_web and not isinstance(rez,str): rez = "" else: if isinstance(rez, list): # multirep run, just return as it is return rez if not(rez is None) and out_ws_name and rez.name() != out_ws_name: # the function does not return None, pylint is wrong #pylint: disable=W1111 rez = RenameWorkspace(InputWorkspace=rez, OutputWorkspace=out_ws_name) return rez
def sqw(self,wksp_in,qbin): """ convert to SmodQw assume direct geom requires string of rebin parameters sqw(w1,'0,.1,12') """ n,r=funcreturns.lhs_info('both') wksp_out=r[0] #ei= (wksp_in.getRun().getLogData("Ei").value) #wksp_in=mtd[wksp_in] SofQW3(InputWorkspace=wksp_in,OutputWorkSpace=wksp_out,QAxisBinning=qbin,EMode="Direct") ##comment lines were code for correcting interceting area rebin as coded in sofqw2, sofqw3 seems to work correctly #CloneWorkspace(InputWorkspace=wksp_in,OutputWorkspace='tmp') #CreateSingleValuedWorkspace(OutputWorkspace='scale',DataValue='0',ErrorValue='0') #Multiply(LHSWorkspace='tmp',RHSWorkspace='scale',OutputWorkspace='tmp') #CreateSingleValuedWorkspace(OutputWorkspace='scale2',DataValue='1',ErrorValue='0') #Plus(LHSWorkspace='tmp',RHSWorkspace='scale2',OutputWorkspace='tmp') #SofQW3(InputWorkspace='tmp',OutputWorkspace='tmp',QAxisBinning=qbin,EMode='Direct') #SetUncertainties(InputWorkSpace='tmp',OutputWorkSpace='tmp') #Divide(LHSWorkspace=wksp_out,RHSWorkspace='tmp',OutputWorkspace=wksp_out) #DeleteWorkspace('tmp') #DeleteWorkspace('scale') #DeleteWorkspace('scale2') return mtd[wksp_out]
def sqw(self, wksp_in, qbin): """ convert to SmodQw assume direct geom requires string of rebin parameters sqw(w1,'0,.1,12') """ n, r = funcreturns.lhs_info('both') wksp_out = r[0] #ei= (wksp_in.getRun().getLogData("Ei").value) #wksp_in=mtd[wksp_in] SofQW3(InputWorkspace=wksp_in, OutputWorkSpace=wksp_out, QAxisBinning=qbin, EMode="Direct") ##comment lines were code for correcting interceting area rebin as coded in sofqw2, sofqw3 seems to work correctly #CloneWorkspace(InputWorkspace=wksp_in,OutputWorkspace='tmp') #CreateSingleValuedWorkspace(OutputWorkspace='scale',DataValue='0',ErrorValue='0') #Multiply(LHSWorkspace='tmp',RHSWorkspace='scale',OutputWorkspace='tmp') #CreateSingleValuedWorkspace(OutputWorkspace='scale2',DataValue='1',ErrorValue='0') #Plus(LHSWorkspace='tmp',RHSWorkspace='scale2',OutputWorkspace='tmp') #SofQW3(InputWorkspace='tmp',OutputWorkspace='tmp',QAxisBinning=qbin,EMode='Direct') #SetUncertainties(InputWorkSpace='tmp',OutputWorkSpace='tmp') #Divide(LHSWorkspace=wksp_out,RHSWorkspace='tmp',OutputWorkspace=wksp_out) #DeleteWorkspace('tmp') #DeleteWorkspace('scale') #DeleteWorkspace('scale2') return mtd[wksp_out]
def transpose(wksp_in): """ transpose workspace """ n,r=funcreturns.lhs_info('both') wksp_out=r[0] Transpose(InputWorkspace=wksp_in,OutputWorkspace=wksp_out) return mtd[wksp_out]
def do_bleed_test(sample_run, max_framerate, ignored_pixels): """Runs the CreatePSDBleedMask algorithm Input: sample_run - The run number of the sample max_framerate - The maximum allowed framerate in a tube. If None, the instrument defaults are used. ignored_pixels - The number of central pixels to ignore. If None, the instrument defaults are used. """ #NOTE: Should be deployed on non-normalized workspace only! logger.notice('Running PSD bleed test') # Load the sample run if __Reducer__: # Try to use generic loader which would work with files or workspaces alike sample_run = __Reducer__.get_run_descriptor(sample_run) data_ws = sample_run.get_workspace() # this will load data if necessary ws_name = data_ws.name()+'_bleed' else: # may be sample run is already a run descriptor despite __Reducer__ have not been exposed data_ws = sample_run.get_workspace() # this will load data if necessary ws_name = data_ws.name()+'_bleed' if max_framerate is None: #get defaults max_framerate = float(data_ws.getInstrument().getNumberParameter('max-tube-framerate')[0]) if ignored_pixels is None: #get defaults ignored_pixels = int(data_ws.getInstrument().getNumberParameter('num-ignored-pixels')[0]) else: # Make sure it is an int ignored_pixels = int(ignored_pixels) # What shall we call the output lhs_names = lhs_info('names') if len(lhs_names) > 0: ws_name = lhs_names[0] else: ws_name = '__do_bleed__test' # Check if all necessary logs present in the workspace,as nxs workspace log names are different # from a raw file workspace logs. try: nFrames= data_ws.getRun().getLogData('goodfrm').value except RuntimeError: try: nFrames = data_ws.getRun().getLogData('good_frames').lastValue() AddSampleLog(Workspace=data_ws, LogName='goodfrm', LogText=str(int(nFrames)), LogType='Number') except RuntimeError: raise RuntimeError("Bleed test fails as no appropriate 'good_frames' or 'goodfrm' log is loaded with ws: {0}\n" "Disable bleed test by setting diag_bleed_test=False or add 'goodfrm' log to the workspace\n"\ .format(data_ws.name())) bleed_test, num_failed = CreatePSDBleedMask(InputWorkspace=data_ws, OutputWorkspace=ws_name, MaxTubeFramerate=max_framerate, NIgnoredCentralPixels=ignored_pixels) return bleed_test, num_failed
def sqwfast(self,wksp_in,qbin): """ convert to SmodQw assume direct geom requires string of rebin parameters sqw(w1,'0,.1,12') """ try: n,r=funcreturns.lhs_info('both') wksp_out=r[0] #ei= (wksp_in.getRun().getLogData("Ei").value) SofQW(wksp_in,OutputWorkspace=wksp_out,QAxisBinning=qbin,EMode="Direct",EFixed=str(ei)) return mtd[wksp_out] except: print 'no output workpsace defined'
def iliad_wrapper(*args): #seq = inspect.stack() # output workspace name. try: n,r = funcreturns.lhs_info('both') out_ws_name = r[0] except: out_ws_name = None host = args[0] if len(args)>1: input_file = args[1] if len(args)>2: output_directory = args[2] else: output_directory =None else: input_file=None output_directory=None # add input file folder to data search directory if file has it if input_file and isinstance(input_file,str): data_path = os.path.dirname(input_file) if len(data_path)>0: try: config.appendDataSearchDir(str(data_path)) args[1] = os.path.basename(input_file) except: # if mantid is not available, this should ignore config pass if output_directory: config['defaultsave.directory'] = output_directory if host._run_from_web: web_vars = dict(host._wvs.standard_vars.items()+host._wvs.advanced_vars.items()) host.reducer.prop_man.set_input_parameters(**web_vars) else: pass # we should set already set up variables using rez = reduce(*args) # prohibit returning workspace to web services. if host._run_from_web and not isinstance(rez,str): rez="" else: if isinstance(rez,list): # multirep run, just return as it is return rez if out_ws_name and rez.name() != out_ws_name : rez=RenameWorkspace(InputWorkspace=rez,OutputWorkspace=out_ws_name) return rez
def do_second_white_test(white_counts, comp_white_counts, tiny, large, out_lo, out_hi, median_lo, median_hi, sigma, variation, start_index=None, end_index=None): """ Run additional tests comparing given another white beam count workspace, comparing to the first Required inputs: white_counts - A workspace containing the integrated counts from a white beam vanadium run comp_white_counts - A workspace containing the integrated counts from a white beam vanadium run tiny - Minimum threshold for acceptance large - Maximum threshold for acceptance median_lo - Fraction of median to consider counting low median_hi - Fraction of median to consider counting high signif - Counts within this number of multiples of the standard dev will be kept variation - Defines a range within which the ratio of the two counts is allowed to fall in terms of the number of medians """ logger.notice('Running second white beam test') # What shall we call the output lhs_names = lhs_info('names') if len(lhs_names) > 0: ws_name = lhs_names[0] else: ws_name = '__do_second_white_test' # Make sure we are a MatrixWorkspace white_counts = ConvertToMatrixWorkspace(InputWorkspace=white_counts,OutputWorkspace=white_counts) comp_white_counts = ConvertToMatrixWorkspace(InputWorkspace=comp_white_counts,OutputWorkspace=comp_white_counts) # Do the white beam test __second_white_tests, failed = do_white_test(comp_white_counts, tiny, large, median_lo, median_hi, sigma, start_index, end_index) # and now compare it with the first effic_var, num_failed = DetectorEfficiencyVariation(WhiteBeamBase=white_counts, WhiteBeamCompare=comp_white_counts, OutputWorkspace=ws_name, Variation=variation, StartWorkspaceIndex=start_index, EndWorkspaceIndex=end_index) DeleteWorkspace(Workspace=str(__second_white_tests)) # Mask those that failed maskWS = effic_var MaskDetectors(Workspace=white_counts, MaskedWorkspace=maskWS) MaskDetectors(Workspace=comp_white_counts, MaskedWorkspace=maskWS) return maskWS, num_failed
def sqw(wksp_in,qbin): """ convert to SmodQw assume direct geom requires string of rebin parameters sqw(w1,'0,.1,12') """ try: n,r=funcreturns.lhs_info('both') wksp_out=r[0] ei= (wksp_in.getRun().getLogData("Ei").value) SofQW2(wksp_in, OutputWorkspace=wksp_out,QAxisBinning=qbin,EMode="Direct",EFixed=str(ei)) Transpose(InputWorkspace=wksp_out,OutputWorkspace=wksp_out) return mtd[wksp_out] except: print 'no output workpsace defined'
def calculate_resolution(input_data, mass, index=0): """ Run the VesuvioResolution function to produce a workspace with the value of the Vesuvio resolution. @param input_data The original TOF data @param mass The mass defining the recoil peak in AMU @param index An optional index to specify the spectrum to use """ from mantid.api import AlgorithmManager, AnalysisDataService from mantid.kernel.funcreturns import lhs_info # Grab the name of the variable that this function call is assigned to try: output_name = lhs_info("names")[0] except IndexError: # No variable specified name_stem = str(input_data) output_name = name_stem + "_res" + str(index) function = "name=VesuvioResolution, Mass=%f" % mass # execute the resolution function using fit. # functions can't currently be executed as stand alone objects, # so for now we will run fit with zero iterations to achieve the same result. fit = mantid.api.AlgorithmManager.createUnmanaged('Fit') fit.initialize() fit.setChild(True) fit.setLogging(False) mantid.simpleapi._set_properties(fit, function, input_data, MaxIterations=0, CreateOutput=True, Output=output_name,WorkspaceIndex=index) fit.execute() values_ws = fit.getProperty("OutputWorkspace").value # extract just the function values extract = mantid.api.AlgorithmManager.createUnmanaged('ExtractSingleSpectrum') extract.initialize() extract.setChild(True) extract.setLogging(False) extract.setProperty("InputWorkspace", values_ws) extract.setProperty("OutputWorkspace", "__unused_for_child") extract.setProperty("WorkspaceIndex", 1) extract.execute() calculated = extract.getProperty("OutputWorkspace").value AnalysisDataService.addOrReplace(output_name, calculated) return calculated
def QCut(self,Emin,Emax,Qmin,delQ,Qmax,**kwargs): intmin=Emin intmax=Emax cutmin=Qmin delcut=delQ cutmax=Qmax try: n,r=funcreturns.lhs_info('both') name=r[0] if kwargs.has_key('shoelace'): if kwargs.has_key('over'): if kwargs.has_key('Handle'): fighandle=kwargs.get_key('Handle') self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',over=True,cutName=name,shoelace=True,Handle=fighandle) else: self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',over=True,cutName=name,shoelace=True) else: self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',cutName=name,shoelace=True) else: if kwargs.has_key('over'): if kwargs.has_key('Handle'): fighandle=kwargs.get('Handle') self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',over=True,cutName=name,Handle=fighandle) else: self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',over=True,cutName=name) else: self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',cutName=name) except: if kwargs.has_key('shoelace'): if kwargs.has_key('over'): if kwargs.has_key('Handle'): fighandle=kwargs.get('Handle') self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',over=True,shoelace=True,Handle=fighandle) else: self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',over=True,shoelace=True) else: self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',shoelace=True) else: if kwargs.has_key('over'): if kwargs.has_key('Handle'): fighandle=kwargs.get('Handle') self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',over=True,Handle=fighandle) else: self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q',over=True) else: self.cut(self.data,intmin,intmax,cutmin,delcut,cutmax,along='q')
def cutMatplotlib(self,direction,intMin,intMax,minX,delX,maxX,**kwargs): #keywords, wkspout= name of output # wkspIn= input data #deal with the call type and where to get the output workspace name from if kwargs.has_key('wkspout'): cut_name=kwargs.get('wkspout') else: n,r=funcreturns.lhs_info('both') cut_name=r[0] if direction == 'x': if kwargs.has_key('wkspIn'): wkspinName=kwargs.get('wkspIn') Rebin2D(InputWorkspace=wkspinName,OutputWorkspace=cut_name,Axis1Binning=str(minX)+','+str(delX)+','+str(maxX),Axis2Binning=str(intMin)+','+str(intMax-intMin)+','+str(intMax),UseFractionalArea='1' ) else: Rebin2D(InputWorkspace=self.data,OutputWorkspace=cut_name,Axis1Binning=str(minX)+','+str(delX)+','+str(maxX),Axis2Binning=str(intMin)+','+str(intMax-intMin)+','+str(intMax),UseFractionalArea='1' ) if direction == 'y': if kwargs.has_key('wkspin'): wkspinName=kwargs.get('wkspin') Rebin2D(InputWorkspace=wkspinName,OutputWorkspace=cut_name,Axis1Binning=str(intMin)+','+str(intMax-intMin)+','+str(intMax),Axis2Binning=str(minX)+','+str(delX)+','+str(maxX),UseFractionalArea='1') Transpose(InputWorkspace=cut_name,OutputWorkspace=cut_name) else: Rebin2D(InputWorkspace=self.data,OutputWorkspace=cut_name,Axis1Binning=str(intMin)+','+str(intMax-intMin)+','+str(intMax),Axis2Binning=str(minX)+','+str(delX)+','+str(maxX),UseFractionalArea='1') Transpose(InputWorkspace=cut_name,OutputWorkspace=cut_name) ReplaceSpecialValues(InputWorkspace=cut_name,OutputWorkspace=cut_name,NaNValue='0',InfinityValue='0') cutDataWorkSpace=mtd[cut_name] evals=cutDataWorkSpace.extractE() xvals=cutDataWorkSpace.extractX() yvals=cutDataWorkSpace.extractY() NewXvals=( xvals + numpy.roll(xvals,-1) )/2 # calculate the bin center NewXvals = numpy.delete(NewXvals,-1) # remove the last element which is junk yvals=yvals[0] evals=evals[0] output=[NewXvals,yvals,evals] return output
def do_bleed_test(sample_run, max_framerate, ignored_pixels): """Runs the CreatePSDBleedMask algorithm Input: sample_run - The run number of the sample max_framerate - The maximum allowed framerate in a tube. If None, the instrument defaults are used. ignored_pixels - The number of central pixels to ignore. If None, the instrument defaults are used. """ # NOTE: it was deployed on loaded workspace and now it works on normalized workspace. Is this acceptable? logger.notice('Running PSD bleed test') # Load the sample run if __Reducer__: # Try to use generic loader which would work with files or workspaces alike sample_run = __Reducer__.get_run_descriptor(sample_run) data_ws = sample_run.get_workspace() # this will load data if necessary ws_name = sample_run.get_ws_name()+'_bleed' else: # may be sample run is already a run descriptor despite __Reducer__ have not been exposed data_ws = sample_run.get_workspace() # this will load data if necessary ws_name = sample_run.get_ws_name()+'_bleed' if max_framerate is None: max_framerate = float(data_ws.getInstrument().getNumberParameter('max-tube-framerate')[0]) if ignored_pixels is None: ignored_pixels = int(data_ws.getInstrument().getNumberParameter('num-ignored-pixels')[0]) else: # Make sure it is an int ignored_pixels = int(ignored_pixels) # What shall we call the output lhs_names = lhs_info('names') if len(lhs_names) > 0: ws_name = lhs_names[0] else: ws_name = '__do_bleed__test' bleed_test, num_failed = CreatePSDBleedMask(InputWorkspace=data_ws, OutputWorkspace=ws_name, MaxTubeFramerate=max_framerate, NIgnoredCentralPixels=ignored_pixels) return bleed_test, num_failed
def diagnose(self, white, **kwargs): """ Run diagnostics on the provided workspaces. This method does some additional processing before moving on to the diagnostics: 1) Computes the white beam integrals, converting to energy 2) Computes the background integral using the instrument defined range 3) Computes a total count from the sample These inputs are passed to the diagnostics functions Required inputs: white - A workspace, run number or filepath of a white beam run. A workspace is assumed to have simple been loaded and nothing else. Optional inputs: sample - A workspace, run number or filepath of a sample run. A workspace is assumed to have simple been loaded and nothing else. (default = None) second_white - If provided an additional set of tests is performed on this. (default = None) hard_mask - A file specifying those spectra that should be masked without testing (default=None) tiny - Minimum threshold for acceptance (default = 1e-10) huge - Maximum threshold for acceptance (default = 1e10) bkgd_range - A list of two numbers indicating the background range (default=instrument defaults) van_out_lo - Lower bound defining outliers as fraction of median value (default = 0.01) van_out_hi - Upper bound defining outliers as fraction of median value (default = 100.) van_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1) van_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5) van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the\n" "difference with respect to the median value must also exceed this number of error bars (default=0.0) samp_zero - If true then zeroes in the vanadium data will count as failed (default = True) samp_lo - Fraction of median to consider counting low for the white beam diag (default = 0) samp_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0) samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the\n" "difference with respect to the median value must also exceed this number of error bars (default=3.3) variation - The number of medians the ratio of the first/second white beam can deviate from the average by (default=1.1) bleed_test - If true then the CreatePSDBleedMask algorithm is run bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the bleed test diagnostic print_results - If True then the results are printed to the screen """ lhs_names = funcreturns.lhs_info('names') if len(lhs_names) > 0: var_name = lhs_names[0] else: var_name = "diag_mask" # Check for any keywords that have not been supplied and put in the defaults for par in self.diag_params: arg = par.lstrip('diag_') if arg not in kwargs: kwargs[arg] = getattr(self, par) # Get the white beam vanadium integrals whiteintegrals = self.do_white(white, None, None,None) # No grouping yet if 'second_white' in kwargs: second_white = kwargs['second_white'] if second_white is None: del kwargs['second_white'] else: other_whiteintegrals = self.do_white(second_white, None, None,None) # No grouping yet kwargs['second_white'] = other_whiteintegrals # Get the background/total counts from the sample if present if 'sample' in kwargs: sample = kwargs['sample'] del kwargs['sample'] # If the bleed test is requested then we need to pass in the sample_run as well if kwargs.get('bleed_test', False): kwargs['sample_run'] = sample # Set up the background integrals result_ws = common.load_runs(sample) result_ws = self.normalise(result_ws, result_ws.name(), self.normalise_method) if 'bkgd_range' in kwargs: bkgd_range = kwargs['bkgd_range'] del kwargs['bkgd_range'] else: bkgd_range = self.background_range background_int = Integration(result_ws, RangeLower=bkgd_range[0],RangeUpper=bkgd_range[1], IncludePartialBins=True) total_counts = Integration(result_ws, IncludePartialBins=True) background_int = ConvertUnits(background_int, "Energy", AlignBins=0) background_int *= 1.7016e8 diagnostics.normalise_background(background_int, whiteintegrals, kwargs.get('second_white',None)) kwargs['background_int'] = background_int kwargs['sample_counts'] = total_counts # If we have a hard_mask, check the instrument name is defined if 'hard_mask' in kwargs: if 'instrument_name' not in kwargs: kwargs['instrument_name'] = self.instr_name # Check how we should run diag if self.diag_spectra is None: # Do the whole lot at once diagnostics.diagnose(whiteintegrals, **kwargs) else: banks = self.diag_spectra.split(";") bank_spectra = [] for b in banks: token = b.split(",") # b = "(,)" if len(token) != 2: raise ValueError("Invalid bank spectra specification in diag %s" % self.diag_spectra) start = int(token[0].lstrip('(')) end = int(token[1].rstrip(')')) bank_spectra.append((start,end)) for index, bank in enumerate(bank_spectra): kwargs['start_index'] = bank[0] - 1 kwargs['end_index'] = bank[1] - 1 diagnostics.diagnose(whiteintegrals, **kwargs) if 'sample_counts' in kwargs: DeleteWorkspace(Workspace='background_int') DeleteWorkspace(Workspace='total_counts') if 'second_white' in kwargs: DeleteWorkspace(Workspace=kwargs['second_white']) # Return a mask workspace diag_mask, det_ids = ExtractMask(InputWorkspace=whiteintegrals,OutputWorkspace=var_name) DeleteWorkspace(Workspace=whiteintegrals) self.spectra_masks = diag_mask return diag_mask
def arb_units(wb_run,sample_run,ei_guess,rebin,map_file='default',monovan_run=None,**kwargs): """ One step conversion of run into workspace containing information about energy transfer Usage: >>arb_units(wb_run,sample_run,ei_guess,rebin) >>arb_units(wb_run,sample_run,ei_guess,rebin,**arguments) >>arb_units(wb_run,sample_run,ei_guess,rebin,mapfile,**arguments) >>arb_units(wb_run Whitebeam run number or file name or workspace sample_run sample run number or file name or workspace ei_guess Ei guess rebin Rebin parameters mapfile Mapfile -- if absent/'default' the defaults from IDF are used monovan_run If present will do the absolute units normalization. Number of additional parameters specified in **kwargs is usually requested for this. If they are absent, program uses defaults, but the defaults (e.g. sample_mass or sample_rmm ) are usually incorrect for a particular run. arguments The dictionary containing additional keyword arguments. The list of allowed additional arguments is defined in InstrName_Parameters.xml file, located in MantidPlot->View->Preferences->Mantid->Directories->Parameter Definitions with run numbers as input: >>dgreduce.arb_units(1000,10001,80,[-10,.1,70]) # will run on default instrument >>dgreduce.arb_units(1000,10001,80,[-10,.1,70],'mari_res', additional keywords as required) >>dgreduce.arb_units(1000,10001,80,'-10,.1,70','mari_res',fixei=True) A detector calibration file must be specified if running the reduction with workspaces as input namely: >>w2=iliad("wb_wksp","run_wksp",ei,rebin_params,mapfile,det_cal_file=cal_file ,diag_remove_zero=False,norm_method='current') type help() for the list of all available keywords. All availible keywords are provided in InstName_Parameters.xml file Some samples are: norm_method =[monitor-1],[monitor-2][Current] background =False , True fixei =False , True save_format =['.spe'],['.nxspe'],'none' detector_van_range =[20,40] in mev bkgd_range =[15000,19000] :integration range for background tests second_white - If provided an additional set of tests is performed on this. (default = None) hardmaskPlus - A file specifying those spectra that should be masked without testing (default=None) tiny - Minimum threshold for acceptance (default = 1e-10) large - Maximum threshold for acceptance (default = 1e10) bkgd_range - A list of two numbers indicating the background range (default=instrument defaults) diag_van_median_rate_limit_lo - Lower bound defining outliers as fraction of median value (default = 0.01) diag_van_median_rate_limit_hi - Upper bound defining outliers as fraction of median value (default = 100.) diag_van_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1) diag_van_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5) diag_van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the difference with respect to the median value must also exceed this number of error bars (default=0.0) diag_remove_zero - If true then zeroes in the vanadium data will count as failed (default = True) diag_samp_samp_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0) diag_samp_samp_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0) diag_samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the" difference with respect to the median value must also exceed this number of error bars (default=3.3) variation -The number of medians the ratio of the first/second white beam can deviate from the average by (default=1.1) bleed_test - If true then the CreatePSDBleedMask algorithm is run bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the bleed test diagnostic print_results - If True then the results are printed to the screen diag_remove_zero =True, False (default):Diag zero counts in background range bleed=True , turn bleed correction on and off on by default for Merlin and LET sum =True,False(default) , sum multiple files det_cal_file= a valid detector block file and path or a raw file. Setting this will use the detector calibraion from the specified file NOT the input raw file mask_run = RunNumber to use for diag instead of the input run number one2one =True, False :Reduction will not use a mapping file hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask hardmaskOnly=Filename :load a hardmask and use as only mask """ global Reducer if Reducer is None or Reducer.instrument is None: raise ValueError("instrument has not been defined, call setup(instrument_name) first.") # -------------------------------------------------------------------------------------------------------- # Deal with mandatory parameters for this and may be some top level procedures # -------------------------------------------------------------------------------------------------------- Reducer.log("****************************************************************"); if isinstance(sample_run,api.Workspace) or (isinstance(sample_run,str) and sample_run in mtd): Reducer.log('*** DGreduce run for: {0:>20} : Workspace name: {1:<20} '.format(Reducer.instr_name,str(sample_run))) else: Reducer.log('*** DGreduce run for: {0:>20} : Run number/s : {1:<20} '.format(Reducer.instr_name,str(sample_run))) try: n,r=funcreturns.lhs_info('both') wksp_out=r[0] except: if sample_run == 0: #deal with the current run being parsed as 0 rather than 00000 sample_run='00000' wksp_out=Reducer.instr_name+str(sample_run)+'.spe' if kwargs.has_key('sum') and kwargs.get('sum')==True: wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe' start_time=time.time() if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True: Reducer.log('Deleting previous instance of temp data') DeleteWorkspace(Workspace=inst_name+'00000.raw') # we may want to run absolute units normalization and this function has been called with monovan run or helper procedure abs_units_defaults_check = False if monovan_run != None : # check if mono-vanadium is provided as multiple files list or just put in brackets occasionally Reducer.log("****************************************************************"); Reducer.log('*** Output will be in absolute units of mb/str/mev/fu') if isinstance(monovan_run,list): if len(monovan_run)>1: raise IOError(' Can currently work only with single monovan file but list supplied') else: monovan_run = monovan_run[0]; abs_units_defaults_check =True if '_defaults_have_changed' in kwargs: del kwargs['_defaults_have_changed'] abs_units_defaults_check =False if "wb_for_monovanadium" in kwargs : wb_for_monovanadium = kwargs['wb_for_monovanadium'] del kwargs['wb_for_monovanadium'] else: wb_for_monovanadium = wb_run; if isinstance(ei_guess,str): ei_guess = float(ei_guess) # set rebinning range Reducer.energy_bins = rebin Reducer.incident_energy = ei_guess; if Reducer.energy_bins[2] > ei_guess: Reducer.log('Error: rebin max rebin range {0:f} exceeds incident energy {1:f}'.format(Reducer.energy_bins[2],ei_guess),'Error') return # Process old legacy parameters which are easy to re-define in dgreduce rather then transfer through Mantid program_args = process_legacy_parameters(**kwargs) # set non-default reducers parameters and check if all optional keys provided as parameters are acceptable and have been defined in IDF changed_Keys=Reducer.set_input_parameters(**program_args); # inform user about changed parameters Reducer.log("*** Provisional Incident energy: {0:>12.3f} mEv".format(ei_guess)) Reducer.log("****************************************************************"); for key in changed_Keys: val = getattr(Reducer,key); Reducer.log(" Value of : {0:<25} is set to : {1:<20} ".format(key,val)) save_dir = config.getString('defaultsave.directory') Reducer.log("****************************************************************"); if monovan_run != None and not('van_mass' in changed_Keys or 'vanadium-mass' in changed_Keys) : Reducer.log("*** Monochromatic vanadium mass used : {0} ".format(Reducer.van_mass)) Reducer.log("*** By default results are saved into: {0}".format(save_dir)); Reducer.log("****************************************************************"); #do we run absolute units normalization and need to warn users if the parameters needed for that have not changed from defaults if abs_units_defaults_check : Reducer.check_abs_norm_defaults_changed(changed_Keys); #process complex parameters # map file given in parameters overrides default map file if map_file != 'default' : Reducer.map_file = map_file # defaults can be None too, but can be a file if Reducer.map_file == None: Reducer.log('one2one map selected') if Reducer.det_cal_file != None : if isinstance(Reducer.det_cal_file,str) and not Reducer.det_cal_file in mtd : # it is a file Reducer.log('Setting detector calibration file to '+Reducer.det_cal_file) else: Reducer.log('Setting detector calibration to {0}, which is probably a workspace '.format(str(Reducer.det_cal_file))) else: Reducer.log('Setting detector calibration to detector block info from '+str(sample_run)) # check if reducer can find all non-run files necessary for the reduction before starting long run. Reducer.check_necessary_files(monovan_run); print 'Output will be normalized to', Reducer.normalise_method if (numpy.size(sample_run)) > 1 and Reducer.sum_runs: #this sums the runs together before passing the summed file to the rest of the reduction #this circumvents the inbuilt method of summing which fails to sum the files for diag #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace sumfilename=Reducer.instr_name+str(sample_run[0])+'.raw' sample_run =sum_files(Reducer.instr_name,sumfilename, sample_run) common.apply_calibration(Reducer.instr_name,sample_run,Reducer.det_cal_file) #sample_run = RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw') if Reducer.mask_run == None : mask_run=sample_run masking = None; masks_done=False if not Reducer.run_diagnostics: header="Diagnostics including hard masking is skipped " masks_done = True; if Reducer.save_and_reuse_masks : raise NotImplementedError("Save and reuse masks option is not yet implemented") mask_file_name = common.create_resultname(str(mask_run),Reducer.instr_name,'_masks.xml') mask_full_file = FileFinder.getFullPath(mask_file_name) if len(mask_full_file) > 0 : masking = LoadMask(Instrument=Reducer.instr_name,InputFile=mask_full_file,OutputWorkspace=mask_file_name) #Reducer.hard_mask_file = mask_full_file; #Reducer.use_hard_mask_only = True masks_done=True header="Masking fully skipped and processed {0} spectra and {1} bad spectra " else: pass #------------------------------------------------------------------------------------------------------------------------------------------------------- # Here we give control to the Reducer # -------------------------------------------------------------------------------------------------------- # diag the sample and detector vanadium. It will deal with hard mask only if it is set that way if not masks_done: print '########### Run diagnose for sample run ##############################' masking = Reducer.diagnose(wb_run,sample = mask_run, second_white = None,print_results=True) header = "Diag Processed workspace with {0:d} spectra and masked {1:d} bad spectra" # Calculate absolute units: if monovan_run != None : if Reducer.mono_correction_factor == None : if Reducer.use_sam_msk_on_monovan == True: Reducer.log(' Applying sample run mask to mono van') else: if not Reducer.use_hard_mask_only : # in this case the masking2 is different but points to the same workspace Should be better soulution for that. print '########### Run diagnose for monochromatic vanadium run ##############' masking2 = Reducer.diagnose(wb_for_monovanadium,sample=monovan_run, second_white = None,rint_results=True) masking += masking2 DeleteWorkspace(masking2) else: # if Reducer.mono_correction_factor != None : pass # save mask if it does not exist and has been already loaded if Reducer.save_and_reuse_masks and not masks_done: SaveMask(InputWorkspace=masking,OutputFile = mask_file_name,GroupedDetectors=True) # Very important statement propagating masks for further usage in convert_to_energy Reducer.spectra_masks=masking # estimate and report the number of failing detectors failed_sp_list,nSpectra = get_failed_spectra_list_from_masks(masking) nMaskedSpectra = len(failed_sp_list) # this tells turkey in case of hard mask only but everything else semens work fine print header.format(nSpectra,nMaskedSpectra) #Run the conversion first on the sample deltaE_wkspace_sample = Reducer.convert_to_energy(sample_run, ei_guess, wb_run) # calculate absolute units integral and apply it to the workspace if monovan_run != None or Reducer.mono_correction_factor != None : deltaE_wkspace_sample = apply_absolute_normalization(Reducer,deltaE_wkspace_sample,monovan_run,ei_guess,wb_run) # Hack for multirep #if isinstance(monovan_run,int): # filename = common.find_file(monovan_run) # output_name = common.create_dataname(filename); # DeleteWorkspace(output_name); results_name = deltaE_wkspace_sample.name(); if results_name != wksp_out: RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out) ei= (deltaE_wkspace_sample.getRun().getLogData("Ei").value) print 'Incident energy found for sample run: ',ei,' meV' end_time=time.time() print 'Elapsed time =',end_time-start_time, 's' if mtd.doesExist('_wksp.spe-white')==True: DeleteWorkspace(Workspace='_wksp.spe-white') # Hack for multirep mode? if mtd.doesExist('hard_mask_ws') == True: DeleteWorkspace(Workspace='hard_mask_ws') return deltaE_wkspace_sample
def op_wrapper(self, other): # Get the result variable to know what to call the output result_info = lhs_info() # Pass off to helper return _do_binary_operation(algorithm, self, other, result_info, inplace, reverse)
def op_wrapper(self): # Get the result variable to know what to call the output result_info = lhs_info() # Pass off to helper return _do_unary_operation(algorithm, self, result_info)
def cutMatplotlib(self, direction, intMin, intMax, minX, delX, maxX, **kwargs): #keywords, wkspout= name of output # wkspIn= input data #deal with the call type and where to get the output workspace name from if kwargs.has_key('wkspout'): cut_name = kwargs.get('wkspout') else: n, r = funcreturns.lhs_info('both') cut_name = r[0] if direction == 'x': if kwargs.has_key('wkspIn'): wkspinName = kwargs.get('wkspIn') Rebin2D(InputWorkspace=wkspinName, OutputWorkspace=cut_name, Axis1Binning=str(minX) + ',' + str(delX) + ',' + str(maxX), Axis2Binning=str(intMin) + ',' + str(intMax - intMin) + ',' + str(intMax), UseFractionalArea='1') else: Rebin2D(InputWorkspace=self.data, OutputWorkspace=cut_name, Axis1Binning=str(minX) + ',' + str(delX) + ',' + str(maxX), Axis2Binning=str(intMin) + ',' + str(intMax - intMin) + ',' + str(intMax), UseFractionalArea='1') if direction == 'y': if kwargs.has_key('wkspin'): wkspinName = kwargs.get('wkspin') Rebin2D(InputWorkspace=wkspinName, OutputWorkspace=cut_name, Axis1Binning=str(intMin) + ',' + str(intMax - intMin) + ',' + str(intMax), Axis2Binning=str(minX) + ',' + str(delX) + ',' + str(maxX), UseFractionalArea='1') Transpose(InputWorkspace=cut_name, OutputWorkspace=cut_name) else: Rebin2D(InputWorkspace=self.data, OutputWorkspace=cut_name, Axis1Binning=str(intMin) + ',' + str(intMax - intMin) + ',' + str(intMax), Axis2Binning=str(minX) + ',' + str(delX) + ',' + str(maxX), UseFractionalArea='1') Transpose(InputWorkspace=cut_name, OutputWorkspace=cut_name) ReplaceSpecialValues(InputWorkspace=cut_name, OutputWorkspace=cut_name, NaNValue='0', InfinityValue='0') cutDataWorkSpace = mtd[cut_name] evals = cutDataWorkSpace.extractE() xvals = cutDataWorkSpace.extractX() yvals = cutDataWorkSpace.extractY() NewXvals = (xvals + numpy.roll(xvals, -1)) / 2 # calculate the bin center NewXvals = numpy.delete(NewXvals, -1) # remove the last element which is junk yvals = yvals[0] evals = evals[0] output = [NewXvals, yvals, evals] return output
def abs_units(wb_for_run,sample_run,monovan_run,wb_for_monovanadium,samp_rmm,samp_mass,ei_guess,rebin,map_file='default',monovan_mapfile='default',**kwargs): """ dgreduce.abs_units(wb_run Whitebeam run number or file name or workspace sample_run Sample run run number or file name or workspace monovan_run Monochromatic run run number or file name or workspace wb_mono White beam for Monochromatic run run number or file name or workspace samp_rmm Mass of formula unit of sample samp_mass Actual sample mass ei_guess Ei guess of run rebin Rebin parameters for output data map_file Mapfile for sample run monovan_mapfile Mapfile for mono van run keyword arguments Any specified additional keyword arguments Example with run numbers abs_units(11001,11002,11003,10098,250.1,5.2,80,'-10,.1,75','mari_res','mari_res') A detector calibration file must be specified if running the reduction with workspace inputs Example with workspace inputs abs_units('wb_run','sam_run','mono_run','wb_for_mono',250.1,5.2,80,'-10,.1,75','mari_res','mari_res', det_cal_file=10001,diag_remove_zero=False,norm_method='current') A detector calibration file must be specified if running the reduction with workspace inputs Available keywords norm_method =[monitor-1],[monitor-2][Current] background =False , True fixei =False , True save_format =['.spe'],['.nxspe'],'none' detector_van_range =[20,40] in mev bkgd_range =[15000,19000] :integration range for background tests second_white - If provided an additional set of tests is performed on this. (default = None) hard_mask_file - A file specifying those spectra that should be masked without testing (default=None) tiny - Minimum threshold for acceptance (default = 1e-10) large - Maximum threshold for acceptance (default = 1e10) bkgd_range - A list of two numbers indicating the background range (default=instrument defaults) diag_van_median_rate_limit_lo - Lower bound defining outliers as fraction of median value (default = 0.01) diag_van_median_rate_limit_hi - Upper bound defining outliers as fraction of median value (default = 100.) diag_van_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1) diag_van_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5) diag_van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the difference with respect to the median value must also exceed this number of error bars (default=0.0) diag_remove_zero - If true then zeros in the vanadium data will count as failed (default = True) diag_samp_samp_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0) diag_samp_samp_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0) diag_samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the" difference with respect to the median value must also exceed this number of error bars (default=3.3) variation -The number of medians the ratio of the first/second white beam can deviate from the average by (default=1.1) bleed_test - If true then the CreatePSDBleedMask algorithm is run bleed_maxrate - If the bleed test is on then this is the maximum frame rate allowed in a tube bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the bleed test diagnostic print_results - If True then the results are printed to the screen diag_remove_zero =True, False (default):Diag zero counts in background range bleed=True , turn bleed correction on and off on by default for Merlin and LET sum =True,False(default) , sum multiple files det_cal_file= a valid detector block file and path or a raw file. Setting this will use the detector calibration from the specified file NOT the input raw file mask_run = RunNumber to use for diag instead of the input run number one2one =True, False :Reduction will not use a mapping file hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask hardmaskOnly=Filename :load a hardmask and use as only mask use_sam_msk_on_monovan=False This will set the total mask to be that of the sample run abs_units_van_range=[-40,40] integral range for absolute vanadium data mono_correction_factor=float User specified correction factor for absolute units normalization """ kwargs['monovan_mapfile'] = monovan_mapfile kwargs['sample_mass'] = samp_mass kwargs['sample_rmm'] = samp_rmm if sample_run: Reducer.sample_run = sample_run try: n,r=funcreturns.lhs_info('both') results_name=r[0] except: results_name = Reducer.prop_man.get_sample_ws_name() if wb_for_run == wb_for_monovanadium: # wb_for_monovanadium property does not accept duplicated workspace wb_for_monovanadium = None # if this value is none, it is constructed to be equal to wb_for_run wksp_out = arb_units(wb_for_run,sample_run,ei_guess,rebin,map_file,monovan_run,wb_for_monovanadium,**kwargs) if results_name != wksp_out.getName(): RenameWorkspace(InputWorkspace=wksp_out,OutputWorkspace=results_name) return wksp_out
def arb_units(wb_run,sample_run,ei_guess,rebin,map_file='default',monovan_run=None,second_wb=None,**kwargs): """ One step conversion of run into workspace containing information about energy transfer Usage: >>arb_units(wb_run,sample_run,ei_guess,rebin) >>arb_units(wb_run,sample_run,ei_guess,rebin,**arguments) >>arb_units(wb_run,sample_run,ei_guess,rebin,mapfile,**arguments) >>arb_units(wb_run Whitebeam run number or file name or workspace sample_run sample run number or file name or workspace ei_guess Ei guess rebin Rebin parameters mapfile Mapfile -- if absent/'default' the defaults from IDF are used monovan_run If present will do the absolute units normalization. Number of additional parameters specified in **kwargs is usually requested for this. If they are absent, program uses defaults, but the defaults (e.g. sample_mass or sample_rmm ) are usually incorrect for a particular run. arguments The dictionary containing additional keyword arguments. The list of allowed additional arguments is defined in InstrName_Parameters.xml file, located in MantidPlot->View->Preferences->Mantid->Directories->Parameter Definitions with run numbers as input: >>dgreduce.arb_units(1000,10001,80,[-10,.1,70]) # will run on default instrument >>dgreduce.arb_units(1000,10001,80,[-10,.1,70],'mari_res', additional keywords as required) >>dgreduce.arb_units(1000,10001,80,'-10,.1,70','mari_res',fixei=True) A detector calibration file must be specified if running the reduction with workspaces as input namely: >>w2=iliad("wb_wksp","run_wksp",ei,rebin_params,mapfile,det_cal_file=cal_file ,diag_remove_zero=False,norm_method='current') type help() for the list of all available keywords. All availible keywords are provided in InstName_Parameters.xml file Some samples are: norm_method =[monitor-1],[monitor-2][Current] background =False , True fixei =False , True save_format =['.spe'],['.nxspe'],'none' detector_van_range =[20,40] in mev bkgd_range =[15000,19000] :integration range for background tests second_white - If provided an additional set of tests is performed on this. (default = None) hardmaskPlus - A file specifying those spectra that should be masked without testing (default=None) tiny - Minimum threshold for acceptance (default = 1e-10) large - Maximum threshold for acceptance (default = 1e10) bkgd_range - A list of two numbers indicating the background range (default=instrument defaults) diag_van_median_rate_limit_lo - Lower bound defining outliers as fraction of median value (default = 0.01) diag_van_median_rate_limit_hi - Upper bound defining outliers as fraction of median value (default = 100.) diag_van_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1) diag_van_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5) diag_van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the difference with respect to the median value must also exceed this number of error bars (default=0.0) diag_remove_zero - If true then zeroes in the vanadium data will count as failed (default = True) diag_samp_samp_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0) diag_samp_samp_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0) diag_samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the" difference with respect to the median value must also exceed this number of error bars (default=3.3) variation -The number of medians the ratio of the first/second white beam can deviate from the average by (default=1.1) bleed_test - If true then the CreatePSDBleedMask algorithm is run bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the bleed test diagnostic print_results - If True then the results are printed to the screen diag_remove_zero =True, False (default):Diag zero counts in background range bleed=True , turn bleed correction on and off on by default for Merlin and LET sum =True,False(default) , sum multiple files det_cal_file= a valid detector block file and path or a raw file. Setting this will use the detector calibraion from the specified file NOT the input raw file mask_run = RunNumber to use for diag instead of the input run number one2one =True, False :Reduction will not use a mapping file hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask hardmaskOnly=Filename :load a hardmask and use as only mask """ global Reducer if Reducer is None or Reducer.instrument is None: raise ValueError("instrument has not been defined, call setup(instrument_name) first.") # -------------------------------------------------------------------------------------------------------- # Deal with mandatory parameters for this and may be some top level procedures # -------------------------------------------------------------------------------------------------------- if sample_run: Reducer.sample_run = sample_run try: n,r=funcreturns.lhs_info('both') wksp_out=r[0] except: wksp_out = Reducer.prop_man.get_sample_ws_name() # res = Reducer.convert_to_energy(wb_run,sample_run,ei_guess,rebin,map_file,monovan_run,second_wb,**kwargs) # results_name = res.name() if results_name != wksp_out: RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out) return res
def arb_units(wb_run,sample_run,ei_guess,rebin,map_file='default',monovan_run=None,second_wb=None,**kwargs): """ One step conversion of run into workspace containing information about energy transfer Usage: >>arb_units(wb_run,sample_run,ei_guess,rebin) >>arb_units(wb_run,sample_run,ei_guess,rebin,**arguments) >>arb_units(wb_run,sample_run,ei_guess,rebin,mapfile,**arguments) >>arb_units(wb_run Whitebeam run number or file name or workspace sample_run sample run number or file name or workspace ei_guess Ei guess rebin Rebin parameters mapfile Mapfile -- if absent/'default' the defaults from IDF are used monovan_run If present will do the absolute units normalization. Number of additional parameters specified in **kwargs is usually requested for this. If they are absent, program uses defaults, but the defaults (e.g. sample_mass or sample_rmm ) are usually incorrect for a particular run. arguments The dictionary containing additional keyword arguments. The list of allowed additional arguments is defined in InstrName_Parameters.xml file, located in MantidPlot->View->Preferences->Mantid->Directories->Parameter Definitions with run numbers as input: >>dgreduce.arb_units(1000,10001,80,[-10,.1,70]) # will run on default instrument >>dgreduce.arb_units(1000,10001,80,[-10,.1,70],'mari_res', additional keywords as required) >>dgreduce.arb_units(1000,10001,80,'-10,.1,70','mari_res',fixei=True) A detector calibration file must be specified if running the reduction with workspaces as input namely: >>w2=iliad("wb_wksp","run_wksp",ei,rebin_params,mapfile,det_cal_file=cal_file ,diag_remove_zero=False,norm_method='current') type help() for the list of all available keywords. All availible keywords are provided in InstName_Parameters.xml file Some samples are: norm_method =[monitor-1],[monitor-2][Current] background =False , True fixei =False , True save_format =['.spe'],['.nxspe'],'none' detector_van_range =[20,40] in mev bkgd_range =[15000,19000] :integration range for background tests second_white - If provided an additional set of tests is performed on this. (default = None) hardmaskPlus - A file specifying those spectra that should be masked without testing (default=None) tiny - Minimum threshold for acceptance (default = 1e-10) large - Maximum threshold for acceptance (default = 1e10) bkgd_range - A list of two numbers indicating the background range (default=instrument defaults) diag_van_median_rate_limit_lo - Lower bound defining outliers as fraction of median value (default = 0.01) diag_van_median_rate_limit_hi - Upper bound defining outliers as fraction of median value (default = 100.) diag_van_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1) diag_van_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5) diag_van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the difference with respect to the median value must also exceed this number of error bars (default=0.0) diag_remove_zero - If true then zeroes in the vanadium data will count as failed (default = True) diag_samp_samp_median_sigma_lo - Fraction of median to consider counting low for the white beam diag (default = 0) diag_samp_samp_median_sigma_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0) diag_samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the" difference with respect to the median value must also exceed this number of error bars (default=3.3) variation -The number of medians the ratio of the first/second white beam can deviate from the average by (default=1.1) bleed_test - If true then the CreatePSDBleedMask algorithm is run bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the bleed test diagnostic print_results - If True then the results are printed to the screen diag_remove_zero =True, False (default):Diag zero counts in background range bleed=True , turn bleed correction on and off on by default for Merlin and LET sum =True,False(default) , sum multiple files det_cal_file= a valid detector block file and path or a raw file. Setting this will use the detector calibraion from the specified file NOT the input raw file mask_run = RunNumber to use for diag instead of the input run number one2one =True, False :Reduction will not use a mapping file hardmaskPlus=Filename :load a hardmarkfile and apply together with diag mask hardmaskOnly=Filename :load a hardmask and use as only mask """ global Reducer if Reducer is None or Reducer.instrument is None: raise ValueError("instrument has not been defined, call setup(instrument_name) first.") # -------------------------------------------------------------------------------------------------------- # Deal with mandatory parameters for this and may be some top level procedures # -------------------------------------------------------------------------------------------------------- if sample_run: Reducer.sample_run = sample_run try: n,r=funcreturns.lhs_info('both') wksp_out=r[0] except: wksp_out = Reducer.prop_man.get_sample_ws_name(); # res = Reducer.convert_to_energy(wb_run,sample_run,ei_guess,rebin,map_file,monovan_run,second_wb,**kwargs) # results_name = res.name(); if results_name != wksp_out: RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out) return res
def QCut(self, Emin, Emax, Qmin, delQ, Qmax, **kwargs): intmin = Emin intmax = Emax cutmin = Qmin delcut = delQ cutmax = Qmax try: n, r = funcreturns.lhs_info('both') name = r[0] if kwargs.has_key('shoelace'): if kwargs.has_key('over'): if kwargs.has_key('Handle'): fighandle = kwargs.get_key('Handle') self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', over=True, cutName=name, shoelace=True, Handle=fighandle) else: self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', over=True, cutName=name, shoelace=True) else: self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', cutName=name, shoelace=True) else: if kwargs.has_key('over'): if kwargs.has_key('Handle'): fighandle = kwargs.get('Handle') self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', over=True, cutName=name, Handle=fighandle) else: self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', over=True, cutName=name) else: self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', cutName=name) except: if kwargs.has_key('shoelace'): if kwargs.has_key('over'): if kwargs.has_key('Handle'): fighandle = kwargs.get('Handle') self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', over=True, shoelace=True, Handle=fighandle) else: self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', over=True, shoelace=True) else: self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', shoelace=True) else: if kwargs.has_key('over'): if kwargs.has_key('Handle'): fighandle = kwargs.get('Handle') self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', over=True, Handle=fighandle) else: self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q', over=True) else: self.cut(self.data, intmin, intmax, cutmin, delcut, cutmax, along='q')
def iliad_wrapper(*args): #seq = inspect.stack() # output workspace name. try: _, r = funcreturns.lhs_info('both') out_ws_name = r[0] # no-exception-type(s) specified. Who knows what exception this internal procedure rises... #pylint: disable=W0702 except: out_ws_name = None host = args[0] if len(args) > 1: input_file = args[1] if len(args) > 2: output_directory = args[2] else: output_directory = None else: input_file = None output_directory = None # add input file folder to data search directory if file has it if input_file and isinstance(input_file, str): data_path = os.path.dirname(input_file) if len(data_path) > 0: try: config.appendDataSearchDir(str(data_path)) args[1] = os.path.basename(input_file) #pylint: disable=bare-except except: # if mantid is not available, this should ignore config pass if output_directory: config['defaultsave.directory'] = str(output_directory) #pylint: disable=protected-access if host._run_from_web: #pylint: disable=protected-access web_vars = host._wvs.get_all_vars() host.reducer.prop_man.set_input_parameters(**web_vars) else: pass # we should set already set up variables using custom_print_function = host.set_custom_output_filename() if not custom_print_function is None: PropertyManager.save_file_name.set_custom_print( custom_print_function) # rez = reduce(*args) # prohibit returning workspace to web services. #pylint: disable=protected-access if host._run_from_web and not isinstance(rez, str): rez = "" else: if isinstance(rez, list): # multirep run, just return as it is return rez if not (rez is None) and out_ws_name and rez.name() != out_ws_name: # the function does not return None, pylint is wrong #pylint: disable=W1111 rez = RenameWorkspace(InputWorkspace=rez, OutputWorkspace=out_ws_name) return rez
def run_reduction(self): """" Reduces runs one by one or sum all them together and reduce after this if wait_for_file time is > 0, it will until missing files appear on the data search path """ try: n,r = funcreturns.lhs_info('both') out_ws_name = r[0] except: out_ws_name = None # if this is not None, we want to run validation not reduction if self.validate_run_number: self.reducer.prop_man.log\ ("**************************************************************************************",'warning') self.reducer.prop_man.log\ ("**************************************************************************************",'warning') rez,mess=self.build_or_validate_result() if rez: self.reducer.prop_man.log("*** SUCCESS! {0}".format(mess)) self.reducer.prop_man.log\ ("**************************************************************************************",'warning') else: self.reducer.prop_man.log("*** VALIDATION FAILED! {0}".format(mess)) self.reducer.prop_man.log\ ("**************************************************************************************",'warning') raise RuntimeError("Validation against old data file failed") self.validate_run_number=None return rez,mess if self.reducer.sum_runs: # --------### sum runs provided ------------------------------------### if out_ws_name is None: self.sum_and_reduce() return None else: red_ws = self.sum_and_reduce() RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=out_ws_name) return mtd[out_ws_name] else: # --------### reduce list of runs one by one ----------------------------### runfiles = PropertyManager.sample_run.get_run_file_list() if out_ws_name is None: for file in runfiles: self.reduce(file) return None else: results = [] nruns = len(runfiles) for num,file in enumerate(runfiles): red_ws = self.reduce(file) if isinstance(red_ws,list): for ws in red_ws: results.append(ws) else: if nruns == 1: if red_ws.name() != out_ws_name: RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=out_ws_name) results.append(mtd[out_ws_name]) else: OutWSName = '{0}#{1}of{2}'.format(out_ws_name,num+1,nruns) if red_ws.name() != out_ws_name: RenameWorkspace(InputWorkspace=red_ws,OutputWorkspace=OutWSName) results.append(mtd[OutWSName]) #end if len(results) == 1: return results[0] else: return results
def run_reduction(self): """" Reduces runs one by one or sum all them together and reduce after this if wait_for_file time is > 0, it will until missing files appear on the data search path """ try: _, r = funcreturns.lhs_info('both') out_ws_name = r[0] # no-exception-type(s) specified. Who knows what exception this internal procedure rises... #pylint: disable=W0702 except: out_ws_name = None # if this is not None, we want to run validation not reduction if self.validate_run_number: self.reducer.prop_man.log\ ("**************************************************************************************",'warning') self.reducer.prop_man.log\ ("**************************************************************************************",'warning') rez, mess = self.build_or_validate_result() if rez: self.reducer.prop_man.log("*** SUCCESS! {0}".format(mess)) self.reducer.prop_man.log\ ("**************************************************************************************",'warning') else: self.reducer.prop_man.log( "*** VALIDATION FAILED! {0}".format(mess)) self.reducer.prop_man.log\ ("**************************************************************************************",'warning') raise RuntimeError("Validation against old data file failed") self.validate_run_number = None return rez, mess if self.reducer.sum_runs: # --------### sum runs provided ------------------------------------### if out_ws_name is None: self.sum_and_reduce() return None else: red_ws = self.sum_and_reduce() RenameWorkspace(InputWorkspace=red_ws, OutputWorkspace=out_ws_name) return mtd[out_ws_name] else: # --------### reduce list of runs one by one ----------------------------### runfiles = PropertyManager.sample_run.get_run_file_list() if out_ws_name is None: for file_name in runfiles: self.reduce(file_name) return None else: results = [] nruns = len(runfiles) for num, file_name in enumerate(runfiles): red_ws = self.reduce(file_name) if isinstance(red_ws, list): for ws in red_ws: results.append(ws) else: if nruns == 1: if red_ws.name() != out_ws_name: RenameWorkspace(InputWorkspace=red_ws, OutputWorkspace=out_ws_name) results.append(mtd[out_ws_name]) else: OutWSName = '{0}#{1}of{2}'.format( out_ws_name, num + 1, nruns) if red_ws.name() != out_ws_name: RenameWorkspace(InputWorkspace=red_ws, OutputWorkspace=OutWSName) results.append(mtd[OutWSName]) #end if len(results) == 1: return results[0] else: return results