예제 #1
0
    def _resample_calibration(
        self, current_workspace, mask_name, x_min, x_max,
    ):
        """Perform resample on calibration"""
        cal = self.getProperty("CalibrationWorkspace").value
        target = self.getProperty("Target").value
        e_fixed = self.getProperty("EFixed").value
        number_bins = self.getProperty("NumberBins").value

        _ws_cal = ExtractUnmaskedSpectra(
            InputWorkspace=cal, MaskWorkspace=mask_name, EnableLogging=False
        )
        if isinstance(mtd["_ws_cal"], IEventWorkspace):
            _ws_cal = Integration(InputWorkspace=_ws_cal, EnableLogging=False)
        CopyInstrumentParameters(
            InputWorkspace=current_workspace,
            OutputWorkspace=_ws_cal,
            EnableLogging=False,
        )
        _ws_cal = ConvertSpectrumAxis(
            InputWorkspace=_ws_cal, Target=target, EFixed=e_fixed, EnableLogging=False,
        )
        _ws_cal = Transpose(InputWorkspace=_ws_cal, EnableLogging=False)
        return ResampleX(
            InputWorkspace=_ws_cal,
            XMin=x_min,
            XMax=x_max,
            NumberBins=number_bins,
            EnableLogging=False,
        )
 def _findLine(self, ws):
     """Return a peak position workspace."""
     # TODO There should be a better algorithm in Mantid to achieve this.
     integratedWSName = self._names.withSuffix('integrated')
     integratedWS = Integration(InputWorkspace=ws,
                                OutputWorkspace=integratedWSName,
                                EnableLogging=self._subalgLogging)
     transposedWSName = self._names.withSuffix('transposed')
     transposedWS = Transpose(InputWorkspace=integratedWS,
                              OutputWorkspace=transposedWSName,
                              EnableLogging=self._subalgLogging)
     self._cleanup.cleanup(integratedWS)
     # Convert spectrum numbers to WS indices.
     wsIndices = numpy.arange(0, ws.getNumberHistograms())
     xs = transposedWS.dataX(0)
     ys = transposedWS.readY(0)
     numpy.copyto(xs, wsIndices)
     indexOfMax = ys.argmax()
     heightGuess = ys[indexOfMax]
     posGuess = xs[indexOfMax]
     sigmaGuess = 3
     f = 'name=Gaussian, PeakCentre={}, Height={}, Sigma={}'.format(
         posGuess, heightGuess, sigmaGuess)
     fitResult = Fit(Function=f,
                     InputWorkspace=transposedWS,
                     EnableLogging=self._subalgLogging)
     self._cleanup.cleanup(transposedWS)
     peakPos = fitResult.Function.PeakCentre
     posTable = self._createFakePeakPositionTable(peakPos)
     return posTable
예제 #3
0
    def _monitor_normalization(self, w, target):
        """
        Divide data by integrated monitor intensity

        Parameters
        ----------
        w: Mantid.EventsWorkspace
            Input workspace
        target: str
            Specify the entity the workspace refers to. Valid options are
            'sample', 'background', and 'vanadium'

        Returns
        -------
        Mantid.EventWorkspace
        """
        _t_mon = self._load_monitors(target)
        _t_mon = ConvertUnits(_t_mon, Target='Wavelength', Emode='Elastic')
        _t_mon = CropWorkspace(_t_mon,
                               XMin=self._wavelength_band[0],
                               XMax=self._wavelength_band[1])
        _t_mon = OneMinusExponentialCor(_t_mon,
                                        C='0.20749999999999999',
                                        C1='0.001276')
        _t_mon = Scale(_t_mon, Factor='1e-06', Operation='Multiply')
        _t_mon = Integration(_t_mon)  # total monitor count
        _t_w = Divide(w, _t_mon, OutputWorkspace=w.name())
        return _t_w
예제 #4
0
def load_banks(run: Union[int, str], bank_selection: str, output_workspace: str) -> Workspace2D:
    r"""
    Load events only for the selected banks, and don't load metadata.

    If the file is not an events file, but a Nexus processed file, the bank_selection is ignored.
    :param run: run-number or filename to an Event nexus file or a processed nexus file
    :param bank_selection: selection string, such as '10,12-15,17-21'
    :param output_workspace: name of the output workspace containing counts per pixel
    :return: workspace containing counts per pixel. Events in each pixel are integrated into neutron counts.
    """
    # Resolve the input run
    if isinstance(run, int):
        file_descriptor = f'CORELLI_{run}'
    else:  # a run number given as a string, or the path to a file
        try:
            file_descriptor = f'CORELLI_{str(int(run))}'
        except ValueError:  # run is path to a file
            filename = run
            assert path.exists(filename), f'File {filename} does not exist'
            file_descriptor = filename

    bank_names = ','.join(['bank' + b for b in bank_numbers(bank_selection)])
    try:
        LoadEventNexus(Filename=file_descriptor, OutputWorkspace=output_workspace,
                       BankName=bank_names, LoadMonitors=False, LoadLogs=True)
    except (RuntimeError, ValueError):
        LoadNexusProcessed(Filename=file_descriptor, OutputWorkspace=output_workspace)
    Integration(InputWorkspace=output_workspace, OutputWorkspace=output_workspace)
    return mtd[output_workspace]
예제 #5
0
def plot_specular_pixel_check(input_workspace: EventWorkspace,
                              flood_workspace: EventWorkspace, ax):
    flooded_ws = ApplyFloodWorkspace(input_workspace, flood_workspace)

    integrated = Integration(flooded_ws,
                             RangeLower=9000,
                             RangeUpper=88000,
                             StartWorkspaceIndex=70,
                             EndWorkspaceIndex=95)

    integrated_transposed = Transpose(integrated)

    def _1gaussian(x, ampl, cent, sigma):
        return ampl * (1 / sigma *
                       (np.sqrt(2 * np.pi))) * (np.exp(-((x - cent)**2) /
                                                       (2 * sigma)**2))

    xval = integrated_transposed.readX(0)
    yval = integrated_transposed.readY(0)
    popt_gauss, pcov_gauss = optimize.curve_fit(_1gaussian,
                                                xval,
                                                yval,
                                                p0=[56000, 86, 0.8])
    perr_gauss = np.sqrt(np.diag(pcov_gauss))

    fit_yvals = _1gaussian(xval, *popt_gauss)

    ax.plot(xval, yval, "rx")
    ax.plot(xval, fit_yvals, 'k--')
    ax.axvline(x=86.0, color='b', linestyle='--')
    ax.set_xlabel("Spectrum")
    ax.set_ylabel("Counts")
    max_pos = fit_yvals.argmax()
    annot_y = fit_yvals[max_pos]
    annot_x = xval[max_pos]
    ax.annotate(f"X:{annot_x}, Y:{annot_y}",
                xy=(annot_x, annot_y),
                xytext=(annot_x * 1.02, annot_y))
    ax.minorticks_on()
    ax.grid(True, which="both")
    ax.set_title("Specular pixel")

    # make interactive plotly figure
    fig = go.Figure()
    fig.add_trace(
        go.Scatter(x=xval,
                   y=yval,
                   name="Data",
                   mode="markers",
                   marker_symbol=4))
    fig.add_trace(go.Scatter(x=xval, y=fit_yvals, mode="lines", name="Fit"))
    fig.add_vline(x=86, line_dash="dash", line_color="blue")
    fig.update_layout(xaxis_title="Spectrum",
                      yaxis_title="Counts",
                      width=600,
                      title_text="Specular pixel",
                      title_x=0.5)
    return fig
예제 #6
0
def _integrateBkgs(ws, eppWS, sigmaMultiplier, wsNames, wsCleanup,
                   algorithmLogging):
    """Return a workspace integrated around flat background areas."""
    histogramCount = ws.getNumberHistograms()
    binMatrix = ws.extractX()
    leftBegins = binMatrix[:, 0]
    leftEnds = numpy.empty(histogramCount)
    rightBegins = numpy.empty(histogramCount)
    rightEnds = binMatrix[:, -1]
    for i in range(histogramCount):
        eppRow = eppWS.row(i)
        if eppRow['FitStatus'] != 'success':
            leftBegins[i] = 0
            leftEnds[i] = 0
            rightBegins[i] = 0
            rightEnds[i] = 0
            continue
        peakCentre = eppRow['PeakCentre']
        sigma = eppRow['Sigma']
        leftEnds[i] = peakCentre - sigmaMultiplier * sigma
        if leftBegins[i] > leftEnds[i]:
            leftBegins[i] = leftEnds[i]
        rightBegins[i] = peakCentre + sigmaMultiplier * sigma
        if rightBegins[i] > rightEnds[i]:
            rightBegins[i] = rightEnds[i]
    leftWSName = wsNames.withSuffix('integrated_left_bkgs')
    leftWS = Integration(InputWorkspace=ws,
                         OutputWorkspace=leftWSName,
                         RangeLowerList=leftBegins,
                         RangeUpperList=leftEnds,
                         EnableLogging=algorithmLogging)
    rightWSName = wsNames.withSuffix('integrated_right_bkgs')
    rightWS = Integration(InputWorkspace=ws,
                          OutputWorkspace=rightWSName,
                          RangeLowerList=rightBegins,
                          RangeUpperList=rightEnds,
                          EnableLogging=algorithmLogging)
    sumWSName = wsNames.withSuffix('integrated_bkgs_sum')
    sumWS = Plus(LHSWorkspace=leftWS,
                 RHSWorkspace=rightWS,
                 OutputWorkspace=sumWSName,
                 EnableLogging=algorithmLogging)
    wsCleanup.cleanup(leftWS)
    wsCleanup.cleanup(rightWS)
    return sumWS
예제 #7
0
    def PyExec(self):
        from mantid.simpleapi import CropWorkspace, Integration, DeleteWorkspace

        in_ws = self.getPropertyValue("InputWorkspace")
        min_wavelength = self.getPropertyValue("StartWavelength")
        keep_workspaces = self.getPropertyValue("KeepIntermediateWorkspaces")

        # Crop off lower wavelengths where the signal is also lower.
        cropped_ws = CropWorkspace(InputWorkspace=in_ws,
                                   XMin=float(min_wavelength))
        # Integrate over the higher wavelengths after cropping.
        summed_ws = Integration(InputWorkspace=cropped_ws)
        # Loop through each histogram, and fetch out each intensity value from the single bin to generate a list of all values.
        n_histograms = summed_ws.getNumberHistograms()
        y_data = np.empty([n_histograms])
        for i in range(0, n_histograms):
            intensity = summed_ws.readY(i)[0]
            y_data[i] = intensity
        #Remove the background
        y_data = self.__remove_background(y_data)
        #Find the peaks
        peak_index_list = self.__find_peak_spectrum_numbers(y_data, summed_ws)
        #Reverse the order so that it goes from high spec number to low spec number
        peak_index_list.reverse()
        n_peaks_found = len(peak_index_list)

        output_ws = WorkspaceFactory.createTable("TableWorkspace")
        output_ws.addColumn("int", "Reflected Spectrum Number")

        if n_peaks_found > 2:
            raise PeakFindingException("Found more than two peaks.")
        elif n_peaks_found == 0:
            raise PeakFindingException("No peaks found")
        elif n_peaks_found == 1:
            output_ws.addRow(peak_index_list)
        elif n_peaks_found == 2:
            output_ws.addColumn("int", "Transmission Spectrum Number")
            output_ws.addRow(peak_index_list)

        if int(keep_workspaces) == 0:
            DeleteWorkspace(Workspace=cropped_ws)
            DeleteWorkspace(Workspace=summed_ws)

        self.setProperty("OutputWorkspace", output_ws)
예제 #8
0
    def _locate_global_xlimit(self):
        """Find the global bin from all spectrum"""
        input_workspaces = self.getProperty("InputWorkspace").value
        mask = self.getProperty("MaskWorkspace").value
        maks_angle = self.getProperty("MaskAngle").value
        target = self.getProperty("Target").value
        e_fixed = self.getProperty("EFixed").value

        # NOTE:
        # Due to range difference among incoming spectra, a common bin para is needed
        # such that all data can be binned exactly the same way.
        _xMin, _xMax = 1e16, -1e16

        # BEGIN_FOR: located_global_xMin&xMax
        for n, _wsn in enumerate(input_workspaces):
            _ws = AnalysisDataService.retrieve(_wsn)
            _mskn = f"__mask_{n}"
            self.temp_workspace_list.append(_mskn)

            ExtractMask(_ws, OutputWorkspace=_mskn, EnableLogging=False)
            if maks_angle != Property.EMPTY_DBL:
                MaskAngle(
                    Workspace=_mskn,
                    MinAngle=maks_angle,
                    Angle="Phi",
                    EnableLogging=False,
                )
            if mask is not None:
                BinaryOperateMasks(
                    InputWorkspace1=_mskn,
                    InputWorkspace2=mask,
                    OperationType="OR",
                    OutputWorkspace=_mskn,
                    EnableLogging=False,
                )

            _ws_tmp = ExtractUnmaskedSpectra(
                InputWorkspace=_ws, MaskWorkspace=_mskn, EnableLogging=False
            )
            if isinstance(mtd["_ws_tmp"], IEventWorkspace):
                _ws_tmp = Integration(InputWorkspace=_ws_tmp, EnableLogging=False)
            _ws_tmp = ConvertSpectrumAxis(
                InputWorkspace=_ws_tmp,
                Target=target,
                EFixed=e_fixed,
                EnableLogging=False,
            )
            _ws_tmp = Transpose(
                InputWorkspace=_ws_tmp, OutputWorkspace=f"__ws_{n}", EnableLogging=False
            )

            _xMin = min(_xMin, _ws_tmp.readX(0).min())
            _xMax = max(_xMax, _ws_tmp.readX(0).max())
        # END_FOR: located_global_xMin&xMax

        return _xMin, _xMax
예제 #9
0
def _normalise_by_integral(workspace):
    integrated = Integration(InputWorkspace=workspace,
                             OutputWorkspace="__integral",
                             StoreInADS=False,
                             EnableLogging=False)
    return Divide(LHSWorkspace=workspace,
                  RHSWorkspace=integrated,
                  OutputWorkspace="__divided",
                  StoreInADS=False,
                  EnableLogging=False)
예제 #10
0
    def PyExec(self):
    	from mantid.simpleapi import CropWorkspace, Integration, DeleteWorkspace

    	in_ws = self.getPropertyValue("InputWorkspace")
    	min_wavelength = self.getPropertyValue("StartWavelength")
    	keep_workspaces = self.getPropertyValue("KeepIntermediateWorkspaces")

    	# Crop off lower wavelengths where the signal is also lower.
    	cropped_ws = CropWorkspace(InputWorkspace=in_ws,XMin=float(min_wavelength))
    	# Integrate over the higher wavelengths after cropping.
    	summed_ws = Integration(InputWorkspace=cropped_ws)
    	# Loop through each histogram, and fetch out each intensity value from the single bin to generate a list of all values.
    	n_histograms = summed_ws.getNumberHistograms()
    	y_data = np.empty([n_histograms])
    	for i in range(0, n_histograms):
    		intensity = summed_ws.readY(i)[0]
    		y_data[i] = intensity
    	#Remove the background
    	y_data = self.__remove_background(y_data)
    	#Find the peaks
    	peak_index_list = self.__find_peak_spectrum_numbers(y_data, summed_ws)
        #Reverse the order so that it goes from high spec number to low spec number
    	peak_index_list.reverse()
    	n_peaks_found = len(peak_index_list)

    	output_ws = WorkspaceFactory.createTable("TableWorkspace")
    	output_ws.addColumn("int", "Reflected Spectrum Number")

    	if n_peaks_found > 2:
    		raise PeakFindingException("Found more than two peaks.")
    	elif n_peaks_found == 0:
    		raise PeakFindingException("No peaks found")
    	elif n_peaks_found == 1:
    		output_ws.addRow(peak_index_list)
    	elif n_peaks_found == 2:
    		output_ws.addColumn("int", "Transmission Spectrum Number")
    		output_ws.addRow(peak_index_list)

    	if int(keep_workspaces) == 0:
    		DeleteWorkspace(Workspace=cropped_ws)
    		DeleteWorkspace(Workspace=summed_ws)

    	self.setProperty("OutputWorkspace", output_ws)
예제 #11
0
def _calculate_vanadium_integral(van_ws, run_no):  # -> Workspace
    """
    Calculate the integral of the normalised vanadium workspace
    :param van_ws: Chosen vanadium run workspace
    :return: Integrated workspace
    """
    ws = NormaliseByCurrent(
        InputWorkspace=van_ws,
        OutputWorkspace=str(run_no) + "_" +
        INTEGRATED_WORKSPACE_NAME)  # create new name ws here
    # sensitivity correction for van
    ws_integ = Integration(InputWorkspace=ws, OutputWorkspace=ws)
    ws_integ /= van_ws.blocksize()
    return ws_integ
예제 #12
0
    def _convert_to_angle(self, w, name):
        """
        Output the integrated intensity for each elastic detector versus
        detector angle with the neutron beam.

        Masked elastic detectors are assigned a zero intensity

        Parameters
        ----------
        w: Mantid.MatrixWorkspace2D
        name: str
            Name of output workspace
        Returns
        -------
        Mantid.MatrixWorkspace2D
        """
        id_s, id_e = 16386, 17534  # start and end for elastic detector ID's
        _t_w_name = tws('convert_to_angle')
        _t_w = Integration(w, OutputWorkspace=_t_w_name)
        sp = _t_w.spectrumInfo()
        x, y, e = [list(), list(), list()]
        for i in range(_t_w.getNumberHistograms()):
            id_i = _t_w.getDetector(i).getID()
            if id_s <= id_i <= id_e:
                x.append(np.degrees(sp.twoTheta(i)))
                if sp.isMasked(i) is True:
                    y.append(0.0)
                    e.append(1.0)
                else:
                    y.append(_t_w.readY(i)[0])
                    e.append(_t_w.readE(i)[0])
        x = np.asarray(x)
        y = np.asarray(y)
        e = np.asarray(e)
        od = np.argsort(x)  # order in ascending angles
        title = 'Angle between detector and incoming neutron beam'
        _t_w = CreateWorkspace(DataX=x[od],
                               DataY=y[od],
                               DataE=e[od],
                               NSpec=1,
                               UnitX='Degrees',
                               WorkspaceTitle=title,
                               OutputWorkspace=_t_w_name)
        RenameWorkspace(_t_w, OutputWorkspace=name)
        return _t_w
예제 #13
0
    def _to_spectrum_axis(self,
                          workspace_in,
                          workspace_out,
                          mask,
                          instrument_donor=None):
        target = self.getProperty("Target").value
        wavelength = self.getProperty("Wavelength").value
        e_fixed = UnitConversion.run('Wavelength', 'Energy', wavelength, 0, 0,
                                     0, Elastic, 0)

        ExtractUnmaskedSpectra(
            InputWorkspace=workspace_in,
            OutputWorkspace=workspace_out,
            MaskWorkspace=mask,
            EnableLogging=False,
        )

        if isinstance(mtd[workspace_out], IEventWorkspace):
            Integration(
                InputWorkspace=workspace_out,
                OutputWorkspace=workspace_out,
                EnableLogging=False,
            )

        if instrument_donor:
            CopyInstrumentParameters(
                InputWorkspace=instrument_donor,
                OutputWorkspace=workspace_out,
                EnableLogging=False,
            )

        ConvertSpectrumAxis(
            InputWorkspace=workspace_out,
            OutputWorkspace=workspace_out,
            Target=target,
            EFixed=e_fixed,
            EnableLogging=False,
        )

        Transpose(
            InputWorkspace=workspace_out,
            OutputWorkspace=workspace_out,
            EnableLogging=False,
        )

        return workspace_out
예제 #14
0
 def _load_vanadium_runs(self):
     """
     Initialize the vanadium workspace and the related mask to avoid using
     pixels with low-counts.
     """
     runs = self.getProperty('VanadiumRuns').value
     _t_van_name = tws('vanadium')
     _t_van = self._load_runs(runs, _t_van_name)
     _t_van = self._apply_corrections(_t_van, target='vanadium')
     _t_van = Integration(_t_van,
                          RangeLower=self._wavelength_band[0],
                          RangeUpper=self._wavelength_band[1],
                          OutputWorkspace=_t_van_name)
     _t_v_mask_name = tws('vanadium_mask')
     output = MedianDetectorTest(_t_van, OutputWorkspace=_t_v_mask_name)
     self._v_mask = output.OutputWorkspace
     MaskDetectors(_t_van, MaskedWorkspace=self._v_mask)
     self._van = _t_van
예제 #15
0
 def _integrate(self, mainWS, wsCleanup, subalgLogging):
     """Integrate mainWS applying Debye-Waller correction, if requested."""
     eppWS = self.getProperty(common.PROP_EPP_WS).value
     calibrationWS = self.getPropertyValue(common.PROP_OUTPUT_WS)
     if self.getProperty(common.PROP_DWF_CORRECTION).value == common.DWF_ON:
         if not self.getProperty(common.PROP_TEMPERATURE).isDefault:
             temperature = self.getProperty(common.PROP_TEMPERATURE).value
         else:
             temperature = 293.0
             ILL_TEMPERATURE_ENTRY = 'sample.temperature'
             if mainWS.run().hasProperty(ILL_TEMPERATURE_ENTRY):
                 temperatureProperty = mainWS.run().getProperty(
                     ILL_TEMPERATURE_ENTRY)
                 if hasattr(temperatureProperty, 'getStatistics'):
                     temperature = temperatureProperty.getStatistics().mean
                 else:
                     temperature = temperatureProperty.value
         calibrationWS = ComputeCalibrationCoefVan(
             VanadiumWorkspace=mainWS,
             EPPTable=eppWS,
             OutputWorkspace=calibrationWS,
             Temperature=temperature,
             EnableLogging=subalgLogging)
         wsCleanup.cleanup(mainWS)
         return calibrationWS
     # No DWF correction - integrate manually.
     # TODO revise when ComputeCalibrationCoefVan supports this option.
     size = eppWS.rowCount()
     starts = numpy.zeros(size)
     ends = numpy.zeros(size)
     for i in range(size):
         row = eppWS.row(i)
         if row['FitStatus'] == 'success':
             fwhm = 2.0 * numpy.sqrt(2.0 * numpy.log(2.0)) * row['Sigma']
             centre = row['PeakCentre']
             starts[i] = centre - 3.0 * fwhm
             ends[i] = centre + 3.0 * fwhm
     calibrationWS = Integration(InputWorkspace=mainWS,
                                 OutputWorkspace=calibrationWS,
                                 RangeLowerList=starts,
                                 RangeUpperList=ends,
                                 EnableLogging=subalgLogging)
     wsCleanup.cleanup(mainWS)
     return calibrationWS
예제 #16
0
파일: wand.py 프로젝트: rosswhitfield/wand
def loadIntegrateData(filename, OutputWorkspace='__ws', wavelength=1.488):
    LoadEventNexus(Filename=filename,
                   OutputWorkspace=OutputWorkspace,
                   LoadMonitors=True)
    Integration(InputWorkspace=OutputWorkspace,
                OutputWorkspace=OutputWorkspace)
    MaskDetectors(OutputWorkspace, DetectorList=range(16384))
    mtd[OutputWorkspace].getAxis(0).setUnit("Wavelength")
    w = np.array([wavelength - 0.001, wavelength + 0.001])
    for idx in range(mtd[OutputWorkspace].getNumberHistograms()):
        mtd[OutputWorkspace].setX(idx, w)
    SetGoniometer(OutputWorkspace, Axis0="HB2C:Mot:s1,0,1,0,1")
    AddSampleLog(OutputWorkspace,
                 LogName="gd_prtn_chrg",
                 LogType='Number',
                 NumberType='Double',
                 LogText=str(mtd[OutputWorkspace +
                                 '_monitors'].getNumberEvents()))
    return OutputWorkspace
예제 #17
0
def load_banks(filename: str, bank_selection: str, output_workspace: str) -> Workspace2D:
    r"""
    Load events only for the selected banks, and don't load metadata.

    If the file is not an events file, but a Nexus processed file, the bank_selection is ignored.
    :param filename: Filename to an Event nexus file or a processed nexus file
    :param bank_selection: selection string, such as '10,12-15,17-21'
    :param output_workspace: name of the output workspace containing counts per pixel
    :return: workspace containing counts per pixel. Events in each pixel are integrated into neutron counts.
    """
    assert path.exists(filename), f'File {filename} does not exist'
    bank_names = ','.join(['bank' + b for b in bank_numbers(bank_selection)])
    try:
        LoadEventNexus(Filename=filename, OutputWorkspace=output_workspace,
                       BankName=bank_names, LoadMonitors=False, LoadLogs=False)
    except (RuntimeError, ValueError):
        LoadNexusProcessed(Filename=filename, OutputWorkspace=output_workspace)
    Integration(InputWorkspace=output_workspace, OutputWorkspace=output_workspace)
    return mtd[output_workspace]
예제 #18
0
파일: LoadWAND.py 프로젝트: freephys/mantid
    def PyExec(self):
        filename = self.getProperty("Filename").value
        wavelength = self.getProperty("wavelength").value
        outWS = self.getPropertyValue("OutputWorkspace")

        LoadEventNexus(Filename=filename,
                       OutputWorkspace=outWS,
                       LoadMonitors=True)
        Integration(InputWorkspace=outWS, OutputWorkspace=outWS)

        if self.getProperty("ApplyMask").value:
            MaskBTP(outWS, Bank='8', Tube='449-480')
            MaskBTP(outWS, Pixel='1,2,511,512')

        mtd[outWS].getAxis(0).setUnit("Wavelength")
        w = [wavelength - 0.001, wavelength + 0.001]
        for idx in range(mtd[outWS].getNumberHistograms()):
            mtd[outWS].setX(idx, w)

        SetGoniometer(outWS, Axis0="HB2C:Mot:s1,0,1,0,1")
        AddSampleLog(outWS,
                     LogName="gd_prtn_chrg",
                     LogType='Number',
                     NumberType='Double',
                     LogText=str(mtd[outWS + '_monitors'].getNumberEvents()))
        DeleteWorkspace(outWS + '_monitors')

        AddSampleLog(outWS,
                     LogName="Wavelength",
                     LogType='Number',
                     NumberType='Double',
                     LogText=str(wavelength))
        AddSampleLog(outWS,
                     LogName="Ei",
                     LogType='Number',
                     NumberType='Double',
                     LogText=str(
                         UnitConversion.run('Wavelength', 'Energy', wavelength,
                                            0, 0, 0, Elastic, 0)))

        self.setProperty('OutputWorkspace', outWS)
예제 #19
0
def _integrateElasticPeaks(ws, eppWS, sigmaMultiplier, wsNames, wsCleanup,
                           algorithmLogging):
    """Return a workspace integrated around the elastic peak."""
    histogramCount = ws.getNumberHistograms()
    integrationBegins = numpy.empty(histogramCount)
    integrationEnds = numpy.empty(histogramCount)
    for i in range(histogramCount):
        eppRow = eppWS.row(i)
        if eppRow['FitStatus'] != 'success':
            integrationBegins[i] = 0
            integrationEnds[i] = 0
            continue
        peakCentre = eppRow['PeakCentre']
        sigma = eppRow['Sigma']
        integrationBegins[i] = peakCentre - sigmaMultiplier * sigma
        integrationEnds[i] = peakCentre + sigmaMultiplier * sigma
    integratedElasticPeaksWSName = \
        wsNames.withSuffix('integrated_elastic_peak')
    integratedElasticPeaksWS = \
        Integration(InputWorkspace=ws,
                    OutputWorkspace=integratedElasticPeaksWSName,
                    IncludePartialBins=True,
                    RangeLowerList=integrationBegins,
                    RangeUpperList=integrationEnds,
                    EnableLogging=algorithmLogging)
    solidAngleWSName = wsNames.withSuffix('detector_solid_angles')
    solidAngleWS = SolidAngle(InputWorkspace=ws,
                              OutputWorkspace=solidAngleWSName,
                              EnableLogging=algorithmLogging)
    solidAngleCorrectedElasticPeaksWSName = \
        wsNames.withSuffix('solid_angle_corrected_elastic_peak')
    solidAngleCorrectedElasticPeaksWS = \
        Divide(LHSWorkspace=integratedElasticPeaksWS,
               RHSWorkspace=solidAngleWS,
               OutputWorkspace=solidAngleCorrectedElasticPeaksWSName,
               EnableLogging=algorithmLogging)
    wsCleanup.cleanup(integratedElasticPeaksWS)
    wsCleanup.cleanup(solidAngleWS)
    return solidAngleCorrectedElasticPeaksWS
    def _convert_to_angle(self, w):
        """
        Output the integrated intensity for each elastic detector versus
        detector angle with the neutron beam.

        Masked elastic detectors are assigned a zero intensity

        Parameters
        ----------
        w: Mantid.MatrixWorkspace2D

        Returns
        -------
        Mantid.MatrixWorkspace2D
        """
        id_s, id_e = 16386, 17534  # start and end for elastic detector ID's
        _t_w = Integration(w)
        sp = _t_w.spectrumInfo()
        x, y, e = [list(), list(), list()]
        for i in range(_t_w.getNumberHistograms()):
            id_i = _t_w.getDetector(i).getID()
            if id_s <= id_i <= id_e:
                x.append(np.degrees(sp.twoTheta(i)))
                if sp.isMasked(i) is True:
                    y.append(0.0)
                    e.append(1.0)
                else:
                    y.append(_t_w.readY(i)[0])
                    e.append(_t_w.readE(i)[0])
        x = np.asarray(x)
        y = np.asarray(y)
        e = np.asarray(e)
        od = np.argsort(x)  # order in ascending angles
        title = 'Angle between detector and incoming neutron beam'
        _t_w = CreateWorkspace(DataX=x[od], DataY=y[od], DataE=e[od],
                               NSpec=1, UnitX='Degrees',
                               WorkspaceTitle=title)
        return _t_w
예제 #21
0
def cc_calibrate_groups(data_ws,
                        group_ws,
                        output_basename="_tmp_group_cc_calibration",
                        previous_calibration=None,
                        Step=0.001,
                        DReference=1.2615,
                        Xmin=1.22,
                        Xmax=1.30,
                        MaxDSpaceShift=None,
                        OffsetThreshold=1E-4,
                        SkipCrossCorrelation=[],
                        PeakFunction="Gaussian",
                        SmoothNPoints=0):
    """This will perform the CrossCorrelate/GetDetectorOffsets on a group
    of detector pixel.

    It works by looping over the different groups in the group_ws,
    extracting all unmasked spectra of a group, then running
    CrossCorrelate and GetDetectorOffsets on just that group, and
    combinning the results at the end. When running a group,
    CrossCorrelate and GetDetectorOffsets could be cycled until
    converging of offsets is reached, given the user input offset
    threshold. If offset threshold is specified to be equal to or
    larger than 1.0, no cycling will be carried out.

    The first unmasked spectra of the group will be used for the
    ReferenceSpectra in CrossCorrelate.

    :param data_ws: Input calibration raw data (in TOF), assumed to already be correctly masked
    :param group_ws: grouping workspace, e.g. output from LoadDetectorsGroupingFile
    :param output_basename: Optional name to use for temporay and output workspace
    :param previous_calibration: Optional previous diffcal workspace
    :param Step: step size for binning of data and input for GetDetectorOffsets, default 0.001
    :param DReference: Derefernce parameter for GetDetectorOffsets, default 1.2615
    :param Xmin: Xmin parameter for CrossCorrelate, default 1.22
    :param Xmax: Xmax parameter for CrossCorrelate, default 1.30
    :param MaxDSpaceShift: MaxDSpaceShift paramter for CrossCorrelate, default None
    :param OffsetThreshold: Convergence threshold for cycling cross correlation, default 1E-4
    :param SkipCrossCorrelation: Skip cross correlation for specified groups
    :param PeakFunction: Peak function to use for extracting the offset
    :param SmoothNPoints: Number of points for smoothing spectra, for cross correlation ONLY
    :return: Combined DiffCal workspace from all the different groups
    """
    if previous_calibration:
        ApplyDiffCal(data_ws, CalibrationWorkspace=previous_calibration)

    data_d = ConvertUnits(data_ws, Target='dSpacing', OutputWorkspace='data_d')

    group_list = np.unique(group_ws.extractY())

    _accum_cc = None
    to_skip = []
    for group in group_list:
        # Figure out input parameters for CrossCorrelate and GetDetectorOffset, specifically
        # for those parameters for which both a single value and a list is accepted. If a
        # list is given, that means different parameter setup will be used for different groups.
        Xmin_group = Xmin[int(group) - 1] if type(Xmin) == list else Xmin
        Xmax_group = Xmax[int(group) - 1] if type(Xmax) == list else Xmax
        MDS_group = MaxDSpaceShift[int(group) - 1] if type(MaxDSpaceShift) == list else MaxDSpaceShift
        DRef_group = DReference[int(group) - 1] if type(DReference) == list else DReference
        OT_group = OffsetThreshold[int(group) - 1] if type(OffsetThreshold) == list else OffsetThreshold
        pf_group = PeakFunction[int(group) - 1] if type(PeakFunction) == list else PeakFunction
        snpts_group = SmoothNPoints[int(group) - 1] if type(SmoothNPoints) == list else SmoothNPoints
        cycling = OT_group < 1.0

        indexes = np.where(group_ws.extractY().flatten() == group)[0]
        sn = np.array(group_ws.getSpectrumNumbers())[indexes]
        try:
            ws_indexes = [data_d.getIndexFromSpectrumNumber(int(i)) for i in sn]
        except RuntimeError:
            # data does not contain spectrum in group
            continue

        if group in SkipCrossCorrelation:
            to_skip.extend(ws_indexes)

        ExtractSpectra(data_d, WorkspaceIndexList=ws_indexes, OutputWorkspace='_tmp_group_cc')
        ExtractUnmaskedSpectra('_tmp_group_cc', OutputWorkspace='_tmp_group_cc')
        ExtractSpectra(data_ws, WorkspaceIndexList=ws_indexes, OutputWorkspace='_tmp_group_cc_raw')
        ExtractUnmaskedSpectra('_tmp_group_cc_raw', OutputWorkspace='_tmp_group_cc_raw')
        num_spectra = mtd['_tmp_group_cc'].getNumberHistograms()
        if num_spectra < 2:
            continue
        Rebin('_tmp_group_cc', Params=f'{Xmin_group},{Step},{Xmax_group}', OutputWorkspace='_tmp_group_cc')
        if snpts_group >= 3:
            SmoothData('_tmp_group_cc', NPoints=snpts_group, OutputWorkspace='_tmp_group_cc')

        # Figure out brightest spectra to be used as the reference for cross correlation.
        CloneWorkspace('_tmp_group_cc_raw', OutputWorkspace='_tmp_group_cc_raw_tmp')
        intg = Integration('_tmp_group_cc_raw_tmp',
                           StartWorkspaceIndex=0,
                           EndWorkspaceIndex=num_spectra-1,
                           OutputWorkspace='_tmp_group_intg')
        brightest_spec_index = int(np.argmax(np.array([intg.readY(i)[0] for i in range(num_spectra)])))

        # Cycling cross correlation. At each step, we will use the obtained offsets and DIFC's from
        # previous step to obtain new DIFC's. In this way, spectra in group will come closer and closer
        # to each other as the cycle goes. This will continue until converging criterion is reached. The
        # converging criterion is set in such a way that the median value of all the non-zero offsets
        # should be smaller than the threshold (user tuned parameter, default to 1E-4, meaning 0.04%
        # relative offset).
        num_cycle = 1
        while True:
            CrossCorrelate('_tmp_group_cc',
                           Xmin=Xmin_group, XMax=Xmax_group,
                           MaxDSpaceShift=MDS_group,
                           ReferenceSpectra=brightest_spec_index,
                           WorkspaceIndexMin=0,
                           WorkspaceIndexMax=num_spectra-1,
                           OutputWorkspace='_tmp_group_cc')

            bin_range = (Xmax_group-Xmin_group)/Step
            GetDetectorOffsets(InputWorkspace='_tmp_group_cc',
                               Step=Step,
                               Xmin=-bin_range, XMax=bin_range,
                               DReference=DRef_group,
                               MaxOffset=1,
                               PeakFunction=pf_group,
                               OutputWorkspace='_tmp_group_cc')

            if group not in SkipCrossCorrelation:
                offsets_tmp = []
                for item in ws_indexes:
                    if abs(mtd['_tmp_group_cc'].readY(item)) != 0:
                        offsets_tmp.append(abs(mtd['_tmp_group_cc'].readY(item)))
                offsets_tmp = np.array(offsets_tmp)
                logger.notice(f'Running group-{group}, cycle-{num_cycle}.')
                logger.notice(f'Median offset (no sign) = {np.median(offsets_tmp)}')
                logger.notice(f'Running group-{group}, cycle-{num_cycle}.')
                logger.notice(f'Median offset (no sign) = {np.median(offsets_tmp)}')
                converged = np.median(offsets_tmp) < OT_group
            else:
                for item in ws_indexes:
                    mtd['_tmp_group_cc'].dataY(item)[0] = 0.0
                logger.notice(f'Cross correlation skipped for group-{group}.')
                converged = True

            if not cycling or converged:
                if cycling and converged:
                    if group not in SkipCrossCorrelation:
                        logger.notice(f'Cross correlation for group-{group} converged, ')
                        logger.notice(f'with offset threshold {OT_group}.')
                break
            else:
                previous_calibration = ConvertDiffCal('_tmp_group_cc',
                                                      PreviousCalibration=previous_calibration,
                                                      OutputWorkspace='_tmp_group_cc_diffcal')
                ApplyDiffCal('_tmp_group_cc_raw', CalibrationWorkspace='_tmp_group_cc_diffcal')
                ConvertUnits('_tmp_group_cc_raw', Target='dSpacing', OutputWorkspace='_tmp_group_cc')
                Rebin('_tmp_group_cc', Params=f'{Xmin_group},{Step},{Xmax_group}', OutputWorkspace='_tmp_group_cc')

            num_cycle += 1

        if not _accum_cc:
            _accum_cc = RenameWorkspace('_tmp_group_cc')
        else:
            _accum_cc += mtd['_tmp_group_cc']
            # DeleteWorkspace('_tmp_group_cc')

    previous_calibration = ConvertDiffCal('_accum_cc',
                                          PreviousCalibration=previous_calibration,
                                          OutputWorkspace=f'{output_basename}_cc_diffcal')

    DeleteWorkspace('_accum_cc')
    DeleteWorkspace('_tmp_group_cc')
    DeleteWorkspace('_tmp_group_cc_raw')
    if cycling and '_tmp_group_cc_diffcal' in mtd:
        DeleteWorkspace('_tmp_group_cc_diffcal')

    return mtd[f'{output_basename}_cc_diffcal'], to_skip
예제 #22
0
from mantid.simpleapi import Load, Integration
import numpy as np

ws_list = np.genfromtxt('/SNS/users/rwp/corelli/tube_calibration/list',
                        delimiter=',',
                        dtype=[('runs', '|S11'), ('banks', '5i8'),
                               ('height', 'i8')])

for run, banks, height in ws_list:
    banks = np.asarray(banks)
    banks = banks[np.nonzero(banks)]
    bank_names = ','.join('bank' + str(b) for b in banks)
    print(run)
    print(banks)
    print('CORELLI_' + run)
    for bank in banks:
        data = Load(Filename='CORELLI_' + run, BankName='bank' + str(bank))
        data = Integration(data)
        data_Y = data.extractY() * -1
        for tube in range(16):
            filename = 'COR_{}_{}_{}'.format(run, bank, tube + 1)
            np.savetxt(
                filename + '.txt',
                np.concatenate((np.array(range(256), ndmin=2).T, data_Y[range(
                    256 * tube, 256 * (tube + 1))]),
                               axis=1))
예제 #23
0
    def PyExec(self):
        data = self.getProperty("InputWorkspace").value
        cal = self.getProperty("CalibrationWorkspace").value
        bkg = self.getProperty("BackgroundWorkspace").value
        mask = self.getProperty("MaskWorkspace").value
        target = self.getProperty("Target").value
        eFixed = self.getProperty("EFixed").value
        xMin = self.getProperty("XMin").value
        xMax = self.getProperty("XMax").value
        numberBins = self.getProperty("NumberBins").value
        normaliseBy = self.getProperty("NormaliseBy").value
        maskAngle = self.getProperty("MaskAngle").value
        outWS = self.getPropertyValue("OutputWorkspace")

        data_scale = 1
        cal_scale = 1
        bkg_scale = 1

        if normaliseBy == "Monitor":
            data_scale = data.run().getProtonCharge()
        elif normaliseBy == "Time":
            data_scale = data.run().getLogData('duration').value

        ExtractMask(data, OutputWorkspace='__mask_tmp', EnableLogging=False)

        if maskAngle != Property.EMPTY_DBL:
            MaskAngle(Workspace='__mask_tmp',
                      MinAngle=maskAngle,
                      Angle='Phi',
                      EnableLogging=False)

        if mask is not None:
            BinaryOperateMasks(InputWorkspace1='__mask_tmp',
                               InputWorkspace2=mask,
                               OperationType='OR',
                               OutputWorkspace='__mask_tmp',
                               EnableLogging=False)

        ExtractUnmaskedSpectra(InputWorkspace=data,
                               MaskWorkspace='__mask_tmp',
                               OutputWorkspace='__data_tmp',
                               EnableLogging=False)
        if isinstance(mtd['__data_tmp'], IEventWorkspace):
            Integration(InputWorkspace='__data_tmp',
                        OutputWorkspace='__data_tmp',
                        EnableLogging=False)
        ConvertSpectrumAxis(InputWorkspace='__data_tmp',
                            Target=target,
                            EFixed=eFixed,
                            OutputWorkspace=outWS,
                            EnableLogging=False)
        Transpose(InputWorkspace=outWS,
                  OutputWorkspace=outWS,
                  EnableLogging=False)
        ResampleX(InputWorkspace=outWS,
                  OutputWorkspace=outWS,
                  XMin=xMin,
                  XMax=xMax,
                  NumberBins=numberBins,
                  EnableLogging=False)

        if cal is not None:
            ExtractUnmaskedSpectra(InputWorkspace=cal,
                                   MaskWorkspace='__mask_tmp',
                                   OutputWorkspace='__cal_tmp',
                                   EnableLogging=False)
            if isinstance(mtd['__cal_tmp'], IEventWorkspace):
                Integration(InputWorkspace='__cal_tmp',
                            OutputWorkspace='__cal_tmp',
                            EnableLogging=False)
            CopyInstrumentParameters(data, '__cal_tmp', EnableLogging=False)
            ConvertSpectrumAxis(InputWorkspace='__cal_tmp',
                                Target=target,
                                EFixed=eFixed,
                                OutputWorkspace='__cal_tmp',
                                EnableLogging=False)
            Transpose(InputWorkspace='__cal_tmp',
                      OutputWorkspace='__cal_tmp',
                      EnableLogging=False)
            ResampleX(InputWorkspace='__cal_tmp',
                      OutputWorkspace='__cal_tmp',
                      XMin=xMin,
                      XMax=xMax,
                      NumberBins=numberBins,
                      EnableLogging=False)
            Divide(LHSWorkspace=outWS,
                   RHSWorkspace='__cal_tmp',
                   OutputWorkspace=outWS,
                   EnableLogging=False)
            if normaliseBy == "Monitor":
                cal_scale = cal.run().getProtonCharge()
            elif normaliseBy == "Time":
                cal_scale = cal.run().getLogData('duration').value

        Scale(InputWorkspace=outWS,
              OutputWorkspace=outWS,
              Factor=cal_scale / data_scale,
              EnableLogging=False)

        if bkg is not None:
            ExtractUnmaskedSpectra(InputWorkspace=bkg,
                                   MaskWorkspace='__mask_tmp',
                                   OutputWorkspace='__bkg_tmp',
                                   EnableLogging=False)
            if isinstance(mtd['__bkg_tmp'], IEventWorkspace):
                Integration(InputWorkspace='__bkg_tmp',
                            OutputWorkspace='__bkg_tmp',
                            EnableLogging=False)
            CopyInstrumentParameters(data, '__bkg_tmp', EnableLogging=False)
            ConvertSpectrumAxis(InputWorkspace='__bkg_tmp',
                                Target=target,
                                EFixed=eFixed,
                                OutputWorkspace='__bkg_tmp',
                                EnableLogging=False)
            Transpose(InputWorkspace='__bkg_tmp',
                      OutputWorkspace='__bkg_tmp',
                      EnableLogging=False)
            ResampleX(InputWorkspace='__bkg_tmp',
                      OutputWorkspace='__bkg_tmp',
                      XMin=xMin,
                      XMax=xMax,
                      NumberBins=numberBins,
                      EnableLogging=False)
            if cal is not None:
                Divide(LHSWorkspace='__bkg_tmp',
                       RHSWorkspace='__cal_tmp',
                       OutputWorkspace='__bkg_tmp',
                       EnableLogging=False)
            if normaliseBy == "Monitor":
                bkg_scale = bkg.run().getProtonCharge()
            elif normaliseBy == "Time":
                bkg_scale = bkg.run().getLogData('duration').value
            Scale(InputWorkspace='__bkg_tmp',
                  OutputWorkspace='__bkg_tmp',
                  Factor=cal_scale / bkg_scale,
                  EnableLogging=False)
            Scale(InputWorkspace='__bkg_tmp',
                  OutputWorkspace='__bkg_tmp',
                  Factor=self.getProperty('BackgroundScale').value,
                  EnableLogging=False)
            Minus(LHSWorkspace=outWS,
                  RHSWorkspace='__bkg_tmp',
                  OutputWorkspace=outWS,
                  EnableLogging=False)

        self.setProperty("OutputWorkspace", outWS)

        # remove temp workspaces
        [
            DeleteWorkspace(ws, EnableLogging=False)
            for ws in self.temp_workspace_list if mtd.doesExist(ws)
        ]
예제 #24
0
    def _flux_normalization(self, w, target):
        """
        Divide data by integrated flux intensity

        Parameters
        ----------
        w: Mantid.EventsWorkspace
            Input workspace
        target: str
            Specify the entity the workspace refers to. Valid options are
            'sample', 'background', and 'vanadium'

        Returns
        -------
        Mantid.EventWorkspace
        """
        valid_targets = ('sample', 'background', 'vanadium')
        if target not in valid_targets:
            raise KeyError('Target must be one of ' + ', '.join(valid_targets))
        w_nor = None
        if self._flux_normalization_type == 'Monitor':
            _t_flux = None
            _t_flux_name = tws('monitor_aggregate')
            target_to_runs = dict(sample='RunNumbers',
                                  background='BackgroundRuns',
                                  vanadium='VanadiumRuns')
            rl = self._run_list(self.getProperty(target_to_runs[target]).value)

            _t_w_name = tws('monitor')
            for run in rl:
                run_name = '{0}_{1}'.format(self._short_inst, str(run))
                _t_w = LoadNexusMonitors(run_name, OutputWorkspace=_t_w_name)
                if _t_flux is None:
                    _t_flux = CloneWorkspace(_t_w,
                                             OutputWorkspace=_t_flux_name)
                else:
                    _t_flux = Plus(_t_flux, _t_w, OutputWorkspace=_t_flux_name)

            _t_flux = ConvertUnits(_t_flux,
                                   Target='Wavelength',
                                   Emode='Elastic',
                                   OutputWorkspace=_t_flux_name)
            _t_flux = CropWorkspace(_t_flux,
                                    XMin=self._wavelength_band[0],
                                    XMax=self._wavelength_band[1],
                                    OutputWorkspace=_t_flux_name)
            _t_flux = OneMinusExponentialCor(_t_flux,
                                             C='0.20749999999999999',
                                             C1='0.001276',
                                             OutputWorkspace=_t_flux_name)
            _t_flux = Scale(_t_flux,
                            Factor='1e-06',
                            Operation='Multiply',
                            OutputWorkspace=_t_flux_name)
            _t_flux = Integration(_t_flux,
                                  RangeLower=self._wavelength_band[0],
                                  RangeUpper=self._wavelength_band[1],
                                  OutputWorkspace=_t_flux_name)
            w_nor = Divide(w, _t_flux, OutputWorkspace=w.name())
        else:
            aggregate_flux = None
            if self._flux_normalization_type == 'Proton Charge':
                aggregate_flux = w.getRun().getProtonCharge()
            elif self._flux_normalization_type == 'Duration':
                aggregate_flux = w.getRun().getProperty('duration').value
            w_nor = Scale(w,
                          Operation='Multiply',
                          Factor=1.0 / aggregate_flux,
                          OutputWorkspace=w.name())
        return w_nor
예제 #25
0
from mantid.simpleapi import Load, Integration
import numpy as np

run1 = 'CORELLI_48505'
run2 = 'CORELLI_48513'

ws1 = Load(run1)
ws2 = Load(run2)
ws1 = Integration(ws1)
ws2 = Integration(ws2)

ws1y = ws1.extractY().reshape((-1, 256))
ws2y = ws2.extractY().reshape((-1, 256))

ws1ysum = ws1y.sum(axis=1)
ws2ysum = ws2y.sum(axis=1)

import matplotlib.pyplot as plt
plt.imshow(ws1y, vmax=1000)
plt.show()

plt.plot(ws1ysum)
plt.plot(ws2ysum)
plt.show()

w1 = ws1ysum[480:912]
w2 = ws2ysum[480:912]

plt.plot(w1)
plt.plot(w2)
plt.show()
예제 #26
0
    def _resample_background(
        self,
        current_background,
        current_workspace,
        make_name,
        x_min,
        x_max,
        resmapled_calibration,
    ):
        """Perform resample on given background"""
        cal = self.getProperty("CalibrationWorkspace").value
        target = self.getProperty("Target").value
        e_fixed = self.getProperty("EFixed").value
        number_bins = self.getProperty("NumberBins").value

        _ws_bkg = ExtractUnmaskedSpectra(
            InputWorkspace=current_background,
            MaskWorkspace=make_name,
            EnableLogging=False,
        )

        if isinstance(mtd["_ws_bkg"], IEventWorkspace):
            _ws_bkg = Integration(InputWorkspace=_ws_bkg, EnableLogging=False)

        CopyInstrumentParameters(
            InputWorkspace=current_workspace,
            OutputWorkspace=_ws_bkg,
            EnableLogging=False,
        )

        _ws_bkg = ConvertSpectrumAxis(
            InputWorkspace=_ws_bkg, Target=target, EFixed=e_fixed, EnableLogging=False,
        )

        _ws_bkg = Transpose(InputWorkspace=_ws_bkg, EnableLogging=False)

        _ws_bkg_resampled = ResampleX(
            InputWorkspace=_ws_bkg,
            XMin=x_min,
            XMax=x_max,
            NumberBins=number_bins,
            EnableLogging=False,
        )

        if cal is not None:
            _ws_bkg_resampled = Divide(
                LHSWorkspace=_ws_bkg_resampled,
                RHSWorkspace=resmapled_calibration,
                EnableLogging=False,
            )

        _ws_bkg_resampled = Scale(
            InputWorkspace=_ws_bkg_resampled,
            Factor=self._get_scale(cal) / self._get_scale(current_background),
            EnableLogging=False,
        )

        _ws_bkg_resampled = Scale(
            InputWorkspace=_ws_bkg_resampled,
            Factor=self.getProperty("BackgroundScale").value,
            EnableLogging=False,
        )

        return _ws_bkg_resampled