def _two_factor_corrections_approximation(self, sample_workspace, container_workspace, factor_workspaces): acc = factor_workspaces['acc'] ass = factor_workspaces['ass'] minuend = Divide(sample_workspace, ass, StoreInADS=False) subtrahend = Divide(container_workspace, acc, StoreInADS=False) difference = Minus(minuend, subtrahend, OutputWorkspace="__difference") return difference
def _divideByDirect(self, ws): """Divide ws by the direct beam.""" ws = self._rebinToDirect(ws) directWS = self.getProperty(Prop.DIRECT_FOREGROUND_WS).value reflectivityWSName = self._names.withSuffix('reflectivity') reflectivityWS = Divide( LHSWorkspace=ws, RHSWorkspace=directWS, OutputWorkspace=reflectivityWSName, EnableLogging=self._subalgLogging) self._cleanup.cleanup(ws) reflectivityWS = common.correctForChopperOpenings(reflectivityWS, directWS, self._names, self._cleanup, self._subalgLogging) reflectivityWS.setYUnit('Reflectivity') reflectivityWS.setYUnitLabel('Reflectivity') return reflectivityWS
def _monitor_normalization(self, w, target): """ Divide data by integrated monitor intensity Parameters ---------- w: Mantid.EventsWorkspace Input workspace target: str Specify the entity the workspace refers to. Valid options are 'sample', 'background', and 'vanadium' Returns ------- Mantid.EventWorkspace """ _t_mon = self._load_monitors(target) _t_mon = ConvertUnits(_t_mon, Target='Wavelength', Emode='Elastic') _t_mon = CropWorkspace(_t_mon, XMin=self._wavelength_band[0], XMax=self._wavelength_band[1]) _t_mon = OneMinusExponentialCor(_t_mon, C='0.20749999999999999', C1='0.001276') _t_mon = Scale(_t_mon, Factor='1e-06', Operation='Multiply') _t_mon = Integration(_t_mon) # total monitor count _t_w = Divide(w, _t_mon, OutputWorkspace=w.name()) return _t_w
def _normalizeToTime(ws, wsNames, wsCleanup, algorithmLogging): """Normalize to the 'actual_time' sample log.""" log = ws.run() if not log.hasProperty('duration'): if not log.hasProperty('actual_time'): raise RuntimeError( "Cannot normalise to acquisition time: 'duration' missing from sample logs." ) time = log.getProperty('actual_time').value else: time = log.getProperty('duration').value if time == 0: raise RuntimeError( "Cannot normalise to acquisition time: time is zero.") if time < 0: raise RuntimeError( "Cannot normalise to acquisition time: time is negative.") normalizedWSName = wsNames.withSuffix('normalized_to_time') normalizationFactorWsName = wsNames.withSuffix('normalization_factor_time') normalizationFactorWS = CreateSingleValuedWorkspace( OutputWorkspace=normalizationFactorWsName, DataValue=time, EnableLogging=algorithmLogging) normalizedWS = Divide(LHSWorkspace=ws, RHSWorkspace=normalizationFactorWS, OutputWorkspace=normalizedWSName, EnableLogging=algorithmLogging) wsCleanup.cleanup(normalizationFactorWS) return normalizedWS
def runTest(self): # load data for bank 1 (incl. monitor spectra 1-5) van = load_data_and_normalise('WISH/input/11_4/WISH00019612.raw', outputWorkspace="van") # create Abs Correction for V shape = '''<sphere id="V-sphere"> <centre x="0.0" y="0.0" z="0.0" /> <radius val="0.0025"/> </sphere>''' CreateSampleShape(InputWorkspace=van, ShapeXML=shape) SetSampleMaterial(InputWorkspace=van, SampleNumberDensity=0.0119, ScatteringXSection=5.197, AttenuationXSection=4.739, ChemicalFormula='V0.95 Nb0.05') abs_cor = AbsorptionCorrection(InputWorkspace=van, ElementSize=0.5) # correct Vanadium run for absorption van = Divide(LHSWorkspace=van, RHSWorkspace=abs_cor, OutputWorkspace=van) # smooth data SmoothNeighbours(InputWorkspace=van, OutputWorkspace=van, Radius=3, NumberOfNeighbours=6) SmoothData(InputWorkspace=van, OutputWorkspace=van, NPoints=300)
def runTest(self): # Load processed vanadium for normalisation (bank 1) van = LoadNexus(Filename="WISH19612_vana_bank1_SXProcessed.nxs") # Load raw data (bank 1) ws = load_data_and_normalise( "WISH00038237.raw") # default so doesn't get overwrite van # normalise to vanadium RebinToWorkspace(WorkspaceToRebin=van, WorkspaceToMatch=ws, OutputWorkspace=van) Divide(LHSWorkspace=ws, RHSWorkspace=van, OutputWorkspace=ws) ReplaceSpecialValues(InputWorkspace=ws, OutputWorkspace=ws, NaNValue=0, InfinityValue=0, BigNumberThreshold=1e15, SmallNumberThreshold=-1e15) # Convert to Diffraction MD and Lorentz Correction wsMD = ConvertToDiffractionMDWorkspace(InputWorkspace=ws, LorentzCorrection=True, OneEventPerBin=False) # BinMD to 2D object and convert to histo so can compare saved workspace wsMD_2Dcut = BinMD(InputWorkspace=wsMD, AxisAligned=False, BasisVector0='Q_lab_x,Angstrom^-1,1.0,0.0,0.0', BasisVector1='Q_lab_y,Angstrom^-1,0.0,1.0,0.0', BasisVector2='Q_lab_z,Angstrom^-1,0.0,0.0,1.0', OutputExtents='0.2,0.8,-0.4,0.4,0.05,0.1', OutputBins='50,50,1') ConvertMDHistoToMatrixWorkspace(InputWorkspace=wsMD_2Dcut, outputWorkspace="wsHisto_2Dcut")
def _waterCalibration(self, ws): """Divide ws by a (water) reference workspace.""" if self.getProperty(Prop.WATER_REFERENCE).isDefault: return ws waterWS = self.getProperty(Prop.WATER_REFERENCE).value detWSName = self._names.withSuffix('water_detectors') waterWS = ExtractMonitors(InputWorkspace=waterWS, DetectorWorkspace=detWSName, EnableLogging=self._subalgLogging) if mtd.doesExist(detWSName) is None: raise RuntimeError('No detectors in the water reference data.') if waterWS.getNumberHistograms() != ws.getNumberHistograms(): self.log().error( 'Water workspace and run do not have the same number of histograms.' ) rebinnedWaterWSName = self._names.withSuffix('water_rebinned') rebinnedWaterWS = RebinToWorkspace(WorkspaceToRebin=waterWS, WorkspaceToMatch=ws, OutputWorkspace=rebinnedWaterWSName, EnableLogging=self._subalgLogging) calibratedWSName = self._names.withSuffix('water_calibrated') calibratedWS = Divide(LHSWorkspace=ws, RHSWorkspace=rebinnedWaterWS, OutputWorkspace=calibratedWSName, EnableLogging=self._subalgLogging) self._cleanup.cleanup(waterWS) self._cleanup.cleanup(rebinnedWaterWS) self._cleanup.cleanup(ws) return calibratedWS
def reduceToPowder(ws, OutputWorkspace, norm=None, taget='Theta', XMin=10, XMax=135, NumberBins=2500): # Add scale by monitor ConvertSpectrumAxis(InputWorkspace=ws, Target=taget, OutputWorkspace=OutputWorkspace) Transpose(InputWorkspace=OutputWorkspace, OutputWorkspace=OutputWorkspace) ResampleX(InputWorkspace=OutputWorkspace, OutputWorkspace=OutputWorkspace, XMin=XMin, XMax=XMax, NumberBins=NumberBins) if norm is not None: ConvertSpectrumAxis(InputWorkspace=norm, Target=taget, OutputWorkspace='__norm') Transpose(InputWorkspace='__norm', OutputWorkspace='__norm') ResampleX(InputWorkspace='__norm', OutputWorkspace='__norm', XMin=XMin, XMax=XMax, NumberBins=NumberBins) Divide(LHSWorkspace=OutputWorkspace, RHSWorkspace='__norm', OutputWorkspace=OutputWorkspace) DeleteWorkspace('__norm') return OutputWorkspace
def performOperation(self): lhs_valid, rhs_valid, err_msg = self.validateInputs() if err_msg != str(): return lhs_valid, rhs_valid, err_msg lhs_ws, rhs_ws = self._scale_input_workspaces() try: if self._operation == '+': if self._md_lhs or self._md_rhs: PlusMD(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) else: Plus(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) elif self._operation == '-': if self._md_lhs or self._md_rhs: MinusMD(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) else: Minus(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) elif self._operation == '*': if self._md_lhs or self._md_rhs: MultiplyMD(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) else: Multiply(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) elif self._operation == 'WM': if self._md_lhs or self._md_rhs: WeightedMeanMD(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) else: WeightedMean(InputWorkspace1=lhs_ws, InputWorkspace2=rhs_ws, OutputWorkspace=self._output_ws) else: if self._md_lhs or self._md_rhs: DivideMD(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) else: Divide(LHSWorkspace=lhs_ws, RHSWorkspace=rhs_ws, OutputWorkspace=self._output_ws) except (RuntimeError, ValueError) as err: return False, False, str(err) else: self._regularize_output_names(self._output_ws) finally: DeleteWorkspaces(WorkspaceList=[lhs_ws, rhs_ws]) return True, True, ""
def _correct_sample(self, sample_workspace, a_ss_workspace): """ Correct for sample only (when no container is given). """ logger.information('Correcting sample') correction_in_lambda = self._convert_units_wavelength(a_ss_workspace) corrected = Divide(LHSWorkspace=sample_workspace, RHSWorkspace=correction_in_lambda) return corrected
def correctSampleData(self, sampleWsName, useVana, vanaWsName, useEmpty, emptyWsName): if useEmpty: Minus(LHSWorkspace=sampleWsName, RHSWorkspace=emptyWsName, OutputWorkspace=sampleWsName) if useVana: Divide(LHSWorkspace=sampleWsName, RHSWorkspace=vanaWsName, OutputWorkspace=sampleWsName)
def _normalise_by_integral(workspace): integrated = Integration(InputWorkspace=workspace, OutputWorkspace="__integral", StoreInADS=False, EnableLogging=False) return Divide(LHSWorkspace=workspace, RHSWorkspace=integrated, OutputWorkspace="__divided", StoreInADS=False, EnableLogging=False)
def _three_factor_corrections_approximation(self, sample_workspace, container_workspace, factor_workspaces): acc = factor_workspaces['acc'] acsc = factor_workspaces['acsc'] assc = factor_workspaces['assc'] subtrahend = Multiply(container_workspace, (acsc / acc), StoreInADS=False) difference = Minus(sample_workspace, subtrahend, StoreInADS=False) quotient = Divide(difference, assc, OutputWorkspace="__quotient") return quotient
def _normalize_by_vanadium(diff_ws, van_ws, diff_ws_name): """ Normalize by vanadium :param van_ws: :param diff_ws_name: :return: """ Divide(LHSWorkspace=diff_ws, RHSWorkspace=van_ws, OutputWorkspace=diff_ws_name) diff_ws = mantid_helper.retrieve_workspace(diff_ws_name) return diff_ws
def _applyCorrections(self, mainWS): """Applies self shielding corrections to a workspace, if corrections exist.""" if self.getProperty(common.PROP_SELF_SHIELDING_CORRECTION_WS).isDefault: return mainWS, False correctionWS = self.getProperty(common.PROP_SELF_SHIELDING_CORRECTION_WS).value correctedWSName = self._names.withSuffix('self_shielding_corrected') correctedWS = Divide(LHSWorkspace=mainWS, RHSWorkspace=correctionWS, OutputWorkspace=correctedWSName, EnableLogging=self._subalgLogging) self._cleanup.cleanup(mainWS) return correctedWS, True
def _divideByDirect(self, ws, directWS): """Divide ws by the direct beam.""" reflectivityWSName = self._names.withSuffix('reflectivity') reflectivityWS = Divide(LHSWorkspace=ws, RHSWorkspace=directWS, OutputWorkspace=reflectivityWSName, EnableLogging=self._subalgLogging) self._cleanup.cleanup(directWS) reflectivityWS.setYUnit('Reflectivity') reflectivityWS.setYUnitLabel('Reflectivity') # The X error data is lost in Divide. reflectivityWS.setDx(0, ws.readDx(0)) self._cleanup.cleanup(ws) return reflectivityWS
def _normalizeToVana(self, mainWS): """Normalize to vanadium workspace.""" if self.getProperty(common.PROP_VANA_WS).isDefault: return mainWS vanaWS = self.getProperty(common.PROP_VANA_WS).value vanaNormalizedWSName = self._names.withSuffix('vanadium_normalized') vanaNormalizedWS = Divide(LHSWorkspace=mainWS, RHSWorkspace=vanaWS, OutputWorkspace=vanaNormalizedWSName, EnableLogging=self._subalgLogging) self._cleanup.cleanup(mainWS) if self.getProperty(common.PROP_ABSOLUTE_UNITS).value == common.ABSOLUTE_UNITS_ON: vanaNormalizedWS = _absoluteUnits(vanaNormalizedWS, vanaWS) return vanaNormalizedWS
def _sensitivity_correction(self, w): """ Divide each pixel by the vanadium count Parameters ---------- w: Events workspace in units of wavelength Returns ------- Mantid.EventWorkspace """ MaskDetectors(w, MaskedWorkspace=self._v_mask) _t_w = Divide(w, self._van, OutputWorkspace=w.name()) return _t_w
def processVana(self, wsName): CylinderAbsorption(InputWorkspace=wsName, OutputWorkspace='Atten', AttenuationXSection=self._attenuationXSection, ScatteringXSection=self._scatteringXSection, SampleNumberDensity=self._sampleNumberDensity, NumberOfWavelengthPoints=self._numberOfWavelengthPoints, ExpMethod=self._expMethod, EMode=self._eMode, EFixed=self._eFixed, CylinderSampleHeight=self._cylinderSampleHeight, CylinderSampleRadius=self._cylinderSampleRadius, NumberOfSlices=self._numberOfSlices, NumberOfAnnuli=self._numberOfAnnuli) Divide(LHSWorkspace=wsName, RHSWorkspace='Atten', OutputWorkspace=wsName)
def __processFile(self, filename, wkspname, file_prog_start, determineCharacterizations): chunks = determineChunking(filename, self.chunkSize) self.log().information('Processing \'%s\' in %d chunks' % (filename, len(chunks))) prog_per_chunk_step = self.prog_per_file * 1./(6.*float(len(chunks))) # for better progress reporting - 6 steps per chunk # inner loop is over chunks for (j, chunk) in enumerate(chunks): prog_start = file_prog_start + float(j) * 5. * prog_per_chunk_step chunkname = "%s_c%d" % (wkspname, j) Load(Filename=filename, OutputWorkspace=chunkname, startProgress=prog_start, endProgress=prog_start+prog_per_chunk_step, **chunk) if determineCharacterizations: self.__determineCharacterizations(filename, chunkname, False) # updates instance variable determineCharacterizations = False prog_start += prog_per_chunk_step if self.filterBadPulses > 0.: FilterBadPulses(InputWorkspace=chunkname, OutputWorkspace=chunkname, LowerCutoff=self.filterBadPulses, startProgress=prog_start, endProgress=prog_start+prog_per_chunk_step) prog_start += prog_per_chunk_step # absorption correction workspace if self.absorption is not None and len(str(self.absorption)) > 0: ConvertUnits(InputWorkspace=chunkname, OutputWorkspace=chunkname, Target='Wavelength', EMode='Elastic') Divide(LHSWorkspace=chunkname, RHSWorkspace=self.absorption, OutputWorkspace=chunkname, startProgress=prog_start, endProgress=prog_start+prog_per_chunk_step) ConvertUnits(InputWorkspace=chunkname, OutputWorkspace=chunkname, Target='TOF', EMode='Elastic') prog_start += prog_per_chunk_step AlignAndFocusPowder(InputWorkspace=chunkname, OutputWorkspace=chunkname, startProgress=prog_start, endProgress=prog_start+2.*prog_per_chunk_step, **self.kwargs) prog_start += 2.*prog_per_chunk_step # AlignAndFocusPowder counts for two steps if j == 0: self.__updateAlignAndFocusArgs(chunkname) RenameWorkspace(InputWorkspace=chunkname, OutputWorkspace=wkspname) else: Plus(LHSWorkspace=wkspname, RHSWorkspace=chunkname, OutputWorkspace=wkspname, ClearRHSWorkspace=self.kwargs['PreserveEvents'], startProgress=prog_start, endProgress=prog_start+prog_per_chunk_step) DeleteWorkspace(Workspace=chunkname) if self.kwargs['PreserveEvents']: CompressEvents(InputWorkspace=wkspname, OutputWorkspace=wkspname)
def reduceToPowder(ws, OutputWorkspace, cal=None, target='Theta', XMin=10, XMax=135, NumberBins=2500, normaliseBy='Monitor'): ConvertSpectrumAxis(InputWorkspace=ws, Target=target, OutputWorkspace=OutputWorkspace) Transpose(InputWorkspace=OutputWorkspace, OutputWorkspace=OutputWorkspace) ResampleX(InputWorkspace=OutputWorkspace, OutputWorkspace=OutputWorkspace, XMin=XMin, XMax=XMax, NumberBins=NumberBins) if cal is not None: CopyInstrumentParameters(ws, cal) ConvertSpectrumAxis(InputWorkspace=cal, Target=target, OutputWorkspace='__cal') Transpose(InputWorkspace='__cal', OutputWorkspace='__cal') ResampleX(InputWorkspace='__cal', OutputWorkspace='__cal', XMin=XMin, XMax=XMax, NumberBins=NumberBins) Divide(LHSWorkspace=OutputWorkspace, RHSWorkspace='__cal', OutputWorkspace=OutputWorkspace) DeleteWorkspace('__cal') if normaliseBy == "Monitor": ws_monitor = mtd[ws].run().getProtonCharge() cal_monitor = mtd[cal].run().getProtonCharge() Scale(InputWorkspace=OutputWorkspace, OutputWorkspace=OutputWorkspace, Factor=cal_monitor / ws_monitor) elif normaliseBy == "Time": ws_duration = mtd[ws].run().getLogData('duration').value cal_duration = mtd[cal].run().getLogData('duration').value Scale(InputWorkspace=OutputWorkspace, OutputWorkspace=OutputWorkspace, Factor=cal_duration / ws_duration) return OutputWorkspace
def fold_chopped(workspace_name): """ Folds multiple frames of a data set into one workspace. @param workspace_name Name of the group to fold """ from mantid.simpleapi import (MergeRuns, DeleteWorkspace, CreateWorkspace, Divide) workspaces = mtd[workspace_name].getNames() merged_ws = workspace_name + '_merged' MergeRuns(InputWorkspaces=','.join(workspaces), OutputWorkspace=merged_ws) scaling_ws = '__scaling_ws' unit = mtd[workspace_name].getItem(0).getAxis(0).getUnit().unitID() ranges = [] for ws in mtd[workspace_name].getNames(): x_min = mtd[ws].dataX(0)[0] x_max = mtd[ws].dataX(0)[-1] ranges.append((x_min, x_max)) DeleteWorkspace(Workspace=ws) data_x = mtd[merged_ws].readX(0) data_y = [] data_e = [] for i in range(0, mtd[merged_ws].blocksize()): y_val = 0.0 for rng in ranges: if rng[0] <= data_x[i] <= rng[1]: y_val += 1.0 data_y.append(y_val) data_e.append(0.0) CreateWorkspace(OutputWorkspace=scaling_ws, DataX=data_x, DataY=data_y, DataE=data_e, UnitX=unit) Divide(LHSWorkspace=merged_ws, RHSWorkspace=scaling_ws, OutputWorkspace=workspace_name) DeleteWorkspace(Workspace=merged_ws) DeleteWorkspace(Workspace=scaling_ws)
def divide_workspace(dividend_workspace, divisor_workspace): """ Divides the specified dividend workspace by the specified divisor workspace. Replaces Infinity and NaNValues with 0. :param dividend_workspace: The workspace to be divided. :param divisor_workspace: The workspace to divide by. :return: The dividend workspace / the divisor workspace. """ dividend_ws, divisor_ws = rebin_to_smallest(dividend_workspace, divisor_workspace) divided_ws = Divide(LHSWorkspace=dividend_ws, RHSWorkspace=divisor_ws, OutputWorkspace="divided", StoreInADS=False, EnableLogging=False) return ReplaceSpecialValues(InputWorkspace=divided_ws, NaNValue=0.0, InfinityValue=0.0, OutputWorkspace="removed_special", StoreInADS=False, EnableLogging=False)
def _divideByDirect(self, ws, directWS): """Divide ws by the direct beam.""" reflectivityWSName = self._names.withSuffix('reflectivity') reflectivityWS = Divide( LHSWorkspace=ws, RHSWorkspace=directWS, OutputWorkspace=reflectivityWSName, EnableLogging=self._subalgLogging) self._cleanup.cleanup(directWS) reflectivityWS.setYUnit('Reflectivity') reflectivityWS.setYUnitLabel('Reflectivity') # The X error data is lost in Divide. reflectivityWS.setDx(0, ws.readDx(0)) self._cleanup.cleanup(ws) return reflectivityWS
def _divideByDirect(self, ws): """Divide ws by the direct beam.""" ws = self._rebinToDirect(ws) directWS = self.getProperty(Prop.DIRECT_FOREGROUND_WS).value reflectivityWSName = self._names.withSuffix('reflectivity') reflectivityWS = Divide(LHSWorkspace=ws, RHSWorkspace=directWS, OutputWorkspace=reflectivityWSName, EnableLogging=self._subalgLogging) self._cleanup.cleanup(ws) reflectivityWS.setYUnit('Reflectivity') reflectivityWS.setYUnitLabel('Reflectivity') return reflectivityWS
def runTest(self): try: BASISPowderDiffraction(RunNumbers='74799', FluxNormalizationType='Monitor', OutputWorkspace='powder_Mon', MaskFile='BASIS_Mask_default_diff.xml') BASISPowderDiffraction(RunNumbers='74799', FluxNormalizationType='Proton Charge', OutputWorkspace='powder_Pro', MaskFile='BASIS_Mask_default_diff.xml') Divide(LHSWorkspace='powder_Pro', RHSWorkspace='powder_Mon', OutputWorkspace='powder_ratio') ReplaceSpecialValues(InputWorkspace='powder_ratio', NANValue=1.0, NANError=1.0, OutputWorkspace='powder_ratio') finally: self.preptear()
def _resample_background( self, current_background, current_workspace, make_name, x_min, x_max, resampled_calibration, ): """Perform resample on given background""" # create unique name for this background outname = str(current_background) + str(current_workspace) self.temp_workspace_list.append(outname) self._to_spectrum_axis_resample(current_background, outname, make_name, current_workspace, x_min, x_max) if resampled_calibration: Divide( LHSWorkspace=outname, RHSWorkspace=resampled_calibration, OutputWorkspace=outname, EnableLogging=False, ) cal = self.getProperty("CalibrationWorkspace").valueAsStr Scale( InputWorkspace=outname, OutputWorkspace=outname, Factor=self._get_scale(cal) / self._get_scale(current_background), EnableLogging=False, ) Scale( InputWorkspace=outname, OutputWorkspace=outname, Factor=self.getProperty("BackgroundScale").value, EnableLogging=False, ) return outname
def _waterCalibration(self, ws): """Divide ws by a (water) reference workspace.""" if self.getProperty(Prop.WATER_REFERENCE).isDefault: return ws waterWS = self.getProperty(Prop.WATER_REFERENCE).value # input validation for InputWorkspace compatibility, but runs? if waterWS.getNumberHistograms() != ws.getNumberHistograms(): self.log().error('Water workspace and run do not have the same number of histograms.') rebinnedWaterWSName = self._names.withSuffix('water_rebinned') rebinnedWaterWS = RebinToWorkspace(WorkspaceToRebin=waterWS, WorkspaceToMatch=ws, OutputWorkspace=rebinnedWaterWSName, EnableLogging=self._subalgLogging) calibratedWSName = self._names.withSuffix('water_calibrated') calibratedWS = Divide(LHSWorkspace=ws, RHSWorkspace=rebinnedWaterWS, OutputWorkspace=calibratedWSName, EnableLogging=self._subalgLogging) self._cleanup.cleanup(rebinnedWaterWS) self._cleanup.cleanup(ws) return calibratedWS
def convert_to_2theta(ws_name, num_bins=1000): """ """ # duplicate for vanadium vanadium = CloneWorkspace(InputWorkspace=ws_name, OutputWorkspace='vanadium') # transfer to 2theta for data ConvertSpectrumAxis(InputWorkspace=ws_name, OutputWorkspace=ws_name, Target='Theta') Transpose(InputWorkspace=ws_name, OutputWorkspace=ws_name) ResampleX(InputWorkspace=ws_name, OutputWorkspace=ws_name, NumberBins=num_bins, PreserveEvents=False) # vanadium: set to 1 for now time_van_start = time.time() for iws in range(vanadium.getNumberHistograms()): vanadium.dataY(iws)[0] = 1. time_van_stop = time.time() ConvertSpectrumAxis(InputWorkspace='vanadium', OutputWorkspace='vanadium', Target='Theta') Transpose(InputWorkspace='vanadium', OutputWorkspace='vanadium') ResampleX(InputWorkspace='vanadium', OutputWorkspace='vanadium', NumberBins=num_bins, PreserveEvents=False) norm_ws_name = ws_name + '_normalized' Divide(LHSWorkspace=ws_name, RHSWorkspace='vanadium', OutputWorkspace=norm_ws_name) print('Create vanadium workspace : {} seconds'.format(time_van_stop - time_van_start)) return norm_ws_name
def _integrateElasticPeaks(ws, eppWS, sigmaMultiplier, wsNames, wsCleanup, algorithmLogging): """Return a workspace integrated around the elastic peak.""" histogramCount = ws.getNumberHistograms() integrationBegins = numpy.empty(histogramCount) integrationEnds = numpy.empty(histogramCount) for i in range(histogramCount): eppRow = eppWS.row(i) if eppRow['FitStatus'] != 'success': integrationBegins[i] = 0 integrationEnds[i] = 0 continue peakCentre = eppRow['PeakCentre'] sigma = eppRow['Sigma'] integrationBegins[i] = peakCentre - sigmaMultiplier * sigma integrationEnds[i] = peakCentre + sigmaMultiplier * sigma integratedElasticPeaksWSName = \ wsNames.withSuffix('integrated_elastic_peak') integratedElasticPeaksWS = \ Integration(InputWorkspace=ws, OutputWorkspace=integratedElasticPeaksWSName, IncludePartialBins=True, RangeLowerList=integrationBegins, RangeUpperList=integrationEnds, EnableLogging=algorithmLogging) solidAngleWSName = wsNames.withSuffix('detector_solid_angles') solidAngleWS = SolidAngle(InputWorkspace=ws, OutputWorkspace=solidAngleWSName, EnableLogging=algorithmLogging) solidAngleCorrectedElasticPeaksWSName = \ wsNames.withSuffix('solid_angle_corrected_elastic_peak') solidAngleCorrectedElasticPeaksWS = \ Divide(LHSWorkspace=integratedElasticPeaksWS, RHSWorkspace=solidAngleWS, OutputWorkspace=solidAngleCorrectedElasticPeaksWSName, EnableLogging=algorithmLogging) wsCleanup.cleanup(integratedElasticPeaksWS) wsCleanup.cleanup(solidAngleWS) return solidAngleCorrectedElasticPeaksWS
def _run_focus(input_workspace, tof_output_name, curves, grouping_ws, region_calib) -> None: """Focus the processed full instrument workspace over the chosen region of interest :param input_workspace: Processed full instrument workspace converted to dSpacing :param tof_output_name: Name for the time-of-flight output workspace :param curves: Workspace containing the vanadium curves for this region of interest :param grouping_ws: Grouping workspace to pass to DiffractionFocussing :param region_calib: Region of interest calibration workspace (table ws output from PDCalibration) """ # rename workspace prior to focussing to avoid errors later dspacing_output_name = tof_output_name + "_dSpacing" # focus sample over specified region of interest focused_sample = DiffractionFocussing( InputWorkspace=input_workspace, OutputWorkspace=dspacing_output_name, GroupingWorkspace=grouping_ws) curves_rebinned = RebinToWorkspace(WorkspaceToRebin=curves, WorkspaceToMatch=focused_sample) # flux correction - divide focused sample data by rebinned focused vanadium curve data Divide(LHSWorkspace=focused_sample, RHSWorkspace=curves_rebinned, OutputWorkspace=focused_sample, AllowDifferentNumberSpectra=True) # apply calibration from specified region of interest ApplyDiffCal(InstrumentWorkspace=focused_sample, CalibrationWorkspace=region_calib) # set bankid for use in fit tab run = focused_sample.getRun() if region_calib.name() == "engggui_calibration_bank_1": run.addProperty("bankid", 1, True) elif region_calib.name() == "engggui_calibration_bank_2": run.addProperty("bankid", 2, True) else: run.addProperty("bankid", 3, True) # output in both dSpacing and TOF ConvertUnits(InputWorkspace=focused_sample, OutputWorkspace=tof_output_name, Target='TOF') DeleteWorkspace(curves_rebinned)
def scale_detectors(workspace_name, e_mode='Indirect'): """ Scales detectors by monitor intensity. @param workspace_name Name of detector workspace @param e_mode Energy mode (Indirect for spectroscopy, Elastic for diffraction) """ from mantid.simpleapi import (ConvertUnits, RebinToWorkspace, Divide) monitor_workspace_name = workspace_name + '_mon' ConvertUnits(InputWorkspace=workspace_name, OutputWorkspace=workspace_name, Target='Wavelength', EMode=e_mode) RebinToWorkspace(WorkspaceToRebin=workspace_name, WorkspaceToMatch=monitor_workspace_name, OutputWorkspace=workspace_name) Divide(LHSWorkspace=workspace_name, RHSWorkspace=monitor_workspace_name, OutputWorkspace=workspace_name)