def calc_average(self, data_ws): """ Compute the average Y value of a workspace. The average is computed by collapsing the workspace to a single bin per spectra then masking masking out detectors given by the FindDetectorsOutsideLimits and MedianDetectorTest algorithms. The average is then the computed as the using the remainder and factoring in their errors as weights, i.e. average = sum(Yvalue[i]*weight[i]) / sum(weights) where only those detectors that are unmasked are used and the weight[i] = 1/errorValue[i]. """ e_low = self.monovan_integr_range[0] e_upp = self.monovan_integr_range[1] if e_low > e_upp: raise ValueError("Inconsistent mono-vanadium integration range defined!") data_ws=Rebin(InputWorkspace=data_ws,OutputWorkspace=data_ws,Params= [e_low, 2.*(e_upp-e_low), e_upp]) data_ws=ConvertToMatrixWorkspace(InputWorkspace=data_ws,OutputWorkspace= data_ws) args = {} args['tiny'] = self.diag_tiny args['huge'] = self.diag_huge args['van_out_lo'] = self.monovan_lo_bound args['van_out_hi'] = self.monovan_hi_bound args['van_lo'] = self.monovan_lo_frac args['van_hi'] = self.monovan_hi_frac args['van_sig'] = self.diag_samp_sig diagnostics.diagnose(data_ws, **args) monovan_masks,det_ids = ExtractMask(InputWorkspace=data_ws,OutputWorkspace='monovan_masks') MaskDetectors(Workspace=data_ws, MaskedWorkspace=monovan_masks) DeleteWorkspace(Workspace=monovan_masks) ConvertFromDistribution(Workspace=data_ws) nhist = data_ws.getNumberHistograms() average_value = 0.0 weight_sum = 0.0 for i in range(nhist): try: det = data_ws.getDetector(i) except Exception: continue if det.isMasked(): continue y_value = data_ws.readY(i)[0] if y_value != y_value: continue weight = 1.0/data_ws.readE(i)[0] average_value += y_value * weight weight_sum += weight return average_value / weight_sum
def diagnose(self, white_run, sample_run=None, other_white=None, remove_zero=None, tiny=None, large=None, median_lo=None, median_hi=None, signif=None, bkgd_threshold=None, bkgd_range=None, variation=None, bleed_test=False, bleed_maxrate=None, bleed_pixels=None, hard_mask=None, print_results=False): """ A pass through method to the 'real' one in diagnostics.py Run diagnostics on the provided run and white beam files. There are 4 possible tests, depending on the input given: White beam diagnosis Background tests Second white beam PSD bleed test Required inputs: white_run - The run number or filepath of the white beam run Optional inputs: sample_run - The run number or filepath of the sample run for the background test (default = None) other_white - If provided an addional set of tests is performed on this file. (default = None) remove_zero - If true then zeroes in the data will count as failed (default = False) tiny - Minimum threshold for acceptance (default = 1e-10) large - Maximum threshold for acceptance (default = 1e10) median_lo - Fraction of median to consider counting low (default = 0.1) median_hi - Fraction of median to consider counting high (default = 3.0) signif - Counts within this number of multiples of the standard dev will be kept (default = 3.3) bkgd_threshold - High threshold for background removal in multiples of median (default = 5.0) bkgd_range - The background range as a list of 2 numbers: [min,max]. If not present then they are taken from the parameter file. (default = None) variation - The number of medians the ratio of the first/second white beam can deviate from the average by (default=1.1) bleed_test - If true then the CreatePSDBleedMask algorithm is run bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the bleed test diagnostic hard_mask - A file specifying those spectra that should be masked without testing print_results - If True then the results are printed to std out inst_name - The name of the instrument to perform the diagnosis. If it is not provided then the default instrument is used (default = None) """ lhs_names = lhs_info('names') if len(lhs_names) > 0: var_name = lhs_names[0] else: var_name = None __diagnostic_mask = diagnostics.diagnose(white_run, sample_run, other_white, remove_zero, tiny, large, median_lo, median_hi, signif, bkgd_threshold, bkgd_range, variation, bleed_test, bleed_maxrate, bleed_pixels, hard_mask, print_results, self.instr_name) if var_name is not None: result = RenameWorkspace(str(__diagnostic_mask), var_name).workspace() else: result = __diagnostic_mask self.spectra_masks = result return result
def diagnose(self, white, **kwargs): """ Run diagnostics on the provided workspaces. This method does some additional processing before moving on to the diagnostics: 1) Computes the white beam integrals, converting to energy 2) Computes the background integral using the instrument defined range 3) Computes a total count from the sample These inputs are passed to the diagnostics functions Required inputs: white - A workspace, run number or filepath of a white beam run. A workspace is assumed to have simple been loaded and nothing else. Optional inputs: sample - A workspace, run number or filepath of a sample run. A workspace is assumed to have simple been loaded and nothing else. (default = None) second_white - If provided an additional set of tests is performed on this. (default = None) hard_mask - A file specifying those spectra that should be masked without testing (default=None) tiny - Minimum threshold for acceptance (default = 1e-10) huge - Maximum threshold for acceptance (default = 1e10) bkgd_range - A list of two numbers indicating the background range (default=instrument defaults) van_out_lo - Lower bound defining outliers as fraction of median value (default = 0.01) van_out_hi - Upper bound defining outliers as fraction of median value (default = 100.) van_lo - Fraction of median to consider counting low for the white beam diag (default = 0.1) van_hi - Fraction of median to consider counting high for the white beam diag (default = 1.5) van_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the\n" "difference with respect to the median value must also exceed this number of error bars (default=0.0) samp_zero - If true then zeroes in the vanadium data will count as failed (default = True) samp_lo - Fraction of median to consider counting low for the white beam diag (default = 0) samp_hi - Fraction of median to consider counting high for the white beam diag (default = 2.0) samp_sig - Error criterion as a multiple of error bar i.e. to fail the test, the magnitude of the\n" "difference with respect to the median value must also exceed this number of error bars (default=3.3) variation - The number of medians the ratio of the first/second white beam can deviate from the average by (default=1.1) bleed_test - If true then the CreatePSDBleedMask algorithm is run bleed_maxrate - If the bleed test is on then this is the maximum framerate allowed in a tube bleed_pixels - If the bleed test is on then this is the number of pixels ignored within the bleed test diagnostic print_results - If True then the results are printed to the screen """ lhs_names = funcreturns.lhs_info('names') if len(lhs_names) > 0: var_name = lhs_names[0] else: var_name = "diag_mask" # Check for any keywords that have not been supplied and put in the defaults for par in self.diag_params: arg = par.lstrip('diag_') if arg not in kwargs: kwargs[arg] = getattr(self, par) # Get the white beam vanadium integrals whiteintegrals = self.do_white(white, None, None,None) # No grouping yet if 'second_white' in kwargs: second_white = kwargs['second_white'] if second_white is None: del kwargs['second_white'] else: other_whiteintegrals = self.do_white(second_white, None, None,None) # No grouping yet kwargs['second_white'] = other_whiteintegrals # Get the background/total counts from the sample if present if 'sample' in kwargs: sample = kwargs['sample'] del kwargs['sample'] # If the bleed test is requested then we need to pass in the sample_run as well if kwargs.get('bleed_test', False): kwargs['sample_run'] = sample # Set up the background integrals result_ws = common.load_runs(sample) result_ws = self.normalise(result_ws, result_ws.name(), self.normalise_method) if 'bkgd_range' in kwargs: bkgd_range = kwargs['bkgd_range'] del kwargs['bkgd_range'] else: bkgd_range = self.background_range background_int = Integration(result_ws, RangeLower=bkgd_range[0],RangeUpper=bkgd_range[1], IncludePartialBins=True) total_counts = Integration(result_ws, IncludePartialBins=True) background_int = ConvertUnits(background_int, "Energy", AlignBins=0) background_int *= 1.7016e8 diagnostics.normalise_background(background_int, whiteintegrals, kwargs.get('second_white',None)) kwargs['background_int'] = background_int kwargs['sample_counts'] = total_counts # If we have a hard_mask, check the instrument name is defined if 'hard_mask' in kwargs: if 'instrument_name' not in kwargs: kwargs['instrument_name'] = self.instr_name # Check how we should run diag if self.diag_spectra is None: # Do the whole lot at once diagnostics.diagnose(whiteintegrals, **kwargs) else: banks = self.diag_spectra.split(";") bank_spectra = [] for b in banks: token = b.split(",") # b = "(,)" if len(token) != 2: raise ValueError("Invalid bank spectra specification in diag %s" % self.diag_spectra) start = int(token[0].lstrip('(')) end = int(token[1].rstrip(')')) bank_spectra.append((start,end)) for index, bank in enumerate(bank_spectra): kwargs['start_index'] = bank[0] - 1 kwargs['end_index'] = bank[1] - 1 diagnostics.diagnose(whiteintegrals, **kwargs) if 'sample_counts' in kwargs: DeleteWorkspace(Workspace='background_int') DeleteWorkspace(Workspace='total_counts') if 'second_white' in kwargs: DeleteWorkspace(Workspace=kwargs['second_white']) # Return a mask workspace diag_mask, det_ids = ExtractMask(InputWorkspace=whiteintegrals,OutputWorkspace=var_name) DeleteWorkspace(Workspace=whiteintegrals) self.spectra_masks = diag_mask return diag_mask