Esempio n. 1
0
    def _create_peaks_workspace(self):
        """Create a dummy peaks workspace"""
        path = FileFinder.getFullPath(
            "IDFs_for_UNIT_TESTING/MINITOPAZ_Definition.xml")
        inst = LoadEmptyInstrument(Filename=path)
        ws = CreatePeaksWorkspace(inst, 0)
        DeleteWorkspace(inst)
        SetUB(ws, 1, 1, 1, 90, 90, 90)

        # Add a bunch of random peaks that happen to fall on the
        # detetor bank defined in the IDF
        center_q = np.array([-5.1302, 2.5651, 3.71809])
        qs = []
        for i in np.arange(0, 1, 0.1):
            for j in np.arange(-0.5, 0, 0.1):
                q = center_q.copy()
                q[1] += j
                q[2] += i
                qs.append(q)

        # Add the peaks to the PeaksWorkspace with dummy values for intensity,
        # Sigma, and HKL
        for q in qs:
            peak = ws.createPeak(q)
            peak.setIntensity(100)
            peak.setSigmaIntensity(10)
            peak.setHKL(1, 1, 1)
            ws.addPeak(peak)

        return ws
Esempio n. 2
0
    def _create_peaks_workspace(self):
        """Create a dummy peaks workspace"""
        path = FileFinder.getFullPath("IDFs_for_UNIT_TESTING/MINITOPAZ_Definition.xml")
        inst = LoadEmptyInstrument(Filename=path)
        ws = CreatePeaksWorkspace(inst, 0)
        DeleteWorkspace(inst)
        SetUB(ws, 1, 1, 1, 90, 90, 90)

        # Add a bunch of random peaks that happen to fall on the
        # detetor bank defined in the IDF
        center_q = np.array([-5.1302,2.5651,3.71809])
        qs = []
        for i in np.arange(0, 1, 0.1):
            for j in np.arange(-0.5, 0, 0.1):
                q = center_q.copy()
                q[1] += j
                q[2] += i
                qs.append(q)

        # Add the peaks to the PeaksWorkspace with dummy values for intensity,
        # Sigma, and HKL
        for q in qs:
            peak = ws.createPeak(q)
            peak.setIntensity(100)
            peak.setSigmaIntensity(10)
            peak.setHKL(1, 1, 1)
            ws.addPeak(peak)

        return ws
Esempio n. 3
0
    def test_HFIRCalculateGoniometer_HB3A_phi(self):
        omega = np.deg2rad(42)
        chi = np.deg2rad(-3)
        phi = np.deg2rad(23)
        R1 = np.array([
            [np.cos(omega), 0, -np.sin(omega)],  # omega 0,1,0,-1
            [0, 1, 0],
            [np.sin(omega), 0, np.cos(omega)]
        ])
        R2 = np.array([
            [np.cos(chi), np.sin(chi), 0],  # chi 0,0,1,-1
            [-np.sin(chi), np.cos(chi), 0],
            [0, 0, 1]
        ])
        R3 = np.array([
            [np.cos(phi), 0, -np.sin(phi)],  # phi 0,1,0,-1
            [0, 1, 0],
            [np.sin(phi), 0, np.cos(phi)]
        ])
        R = np.dot(np.dot(R1, R2), R3)

        wl = 1.54
        k = 2 * np.pi / wl
        theta = np.deg2rad(47)
        phi = np.deg2rad(13)

        q_lab = np.array([
            -np.sin(theta) * np.cos(phi), -np.sin(theta) * np.sin(phi),
            1 - np.cos(theta)
        ]) * k

        q_sample = np.dot(np.linalg.inv(R), q_lab)

        peaks = CreatePeaksWorkspace(OutputType="LeanElasticPeak",
                                     NumberOfPeaks=0)
        AddSampleLog(peaks, "Wavelength", str(wl), "Number")
        SetGoniometer(peaks, Axis0='42,0,1,0,-1',
                      Axis1='-3,0,0,1,-1')  # don't set phi

        p = peaks.createPeakQSample(q_sample)
        peaks.addPeak(p)

        HFIRCalculateGoniometer(peaks,
                                OverrideProperty=True,
                                InnerGoniometer=True)

        g = Goniometer()
        g.setR(peaks.getPeak(0).getGoniometerMatrix())
        YZY = g.getEulerAngles('YZY')
        self.assertAlmostEqual(YZY[0], -42, delta=1e-10)  # omega
        self.assertAlmostEqual(YZY[1], 3, delta=1e-10)  # chi
        self.assertAlmostEqual(YZY[2], -23, delta=1e-1)  # phi

        self.assertAlmostEqual(peaks.getPeak(0).getWavelength(),
                               1.54,
                               delta=1e-10)
    def test_handles_inaccurate_goniometer(self):
        peaks1 = CreatePeaksWorkspace(InstrumentWorkspace=self.ws,
                                      NumberOfPeaks=0,
                                      OutputWorkspace="SXD_peaks3")
        peaks2 = CloneWorkspace(InputWorkspace=peaks1,
                                OutputWorkspace="SXD_peaks4")
        # set different gonio on each run
        rot = 5
        SetGoniometer(Workspace=peaks1, Axis0=f'{-rot},0,1,0,1')
        SetGoniometer(Workspace=peaks2, Axis0=f'{rot},0,1,0,1')
        # Add peaks at QLab corresponding to slightly different gonio rotations
        UB = np.diag([0.25, 0.25, 0.1])  # alatt = [4,4,10]
        for h in range(0, 3):
            for k in range(0, 3):
                hkl = np.array([h, k, 4])
                qlab = 2 * np.pi * np.matmul(
                    np.matmul(getR(-(rot + 1), [0, 1, 0]), UB), hkl)
                pk = peaks1.createPeak(qlab)
                peaks1.addPeak(pk)
                qlab = 2 * np.pi * np.matmul(
                    np.matmul(getR(rot + 1, [0, 1, 0]), UB), hkl)
                pk = peaks2.createPeak(qlab)
                peaks2.addPeak(pk)

        FindGlobalBMatrix(PeakWorkspaces=[peaks1, peaks2],
                          a=4.15,
                          b=3.95,
                          c=10,
                          alpha=88,
                          beta=88,
                          gamma=89,
                          Tolerance=0.15)

        # check lattice - shouldn't be effected by error in goniometer
        self.assert_lattice([peaks1, peaks2],
                            4.0,
                            4.0,
                            10.0,
                            90.0,
                            90.0,
                            90.0,
                            delta_latt=2e-2,
                            delta_angle=2.5e-1)
        self.assert_matrix([peaks1],
                           getBMatrix(peaks2),
                           getBMatrix,
                           delta=1e-10)  # should have same B matrix
        self.assert_matrix([peaks1, peaks2], np.eye(3), getUMatrix, delta=5e-2)
Esempio n. 5
0
    def test_HFIRCalculateGoniometer_HB2C_omega(self):
        omega = np.deg2rad(42)

        R = np.array([[np.cos(omega), 0, np.sin(omega)], [0, 1, 0],
                      [-np.sin(omega), 0, np.cos(omega)]])

        wl = 1.54
        k = 2 * np.pi / wl
        theta = np.deg2rad(47)
        phi = np.deg2rad(13)

        q_lab = np.array([
            -np.sin(theta) * np.cos(phi), -np.sin(theta) * np.sin(phi),
            1 - np.cos(theta)
        ]) * k

        q_sample = np.dot(np.linalg.inv(R), q_lab)

        peaks = CreatePeaksWorkspace(OutputType="LeanElasticPeak",
                                     NumberOfPeaks=0)

        p = peaks.createPeakQSample(q_sample)
        peaks.addPeak(p)

        HFIRCalculateGoniometer(peaks, wl)

        g = Goniometer()
        g.setR(peaks.getPeak(0).getGoniometerMatrix())
        YZY = g.getEulerAngles('YZY')
        self.assertAlmostEqual(YZY[0], 42, delta=1e-10)  # omega
        self.assertAlmostEqual(YZY[1], 0, delta=1e-10)  # chi
        self.assertAlmostEqual(YZY[2], 0, delta=1e-10)  # phi

        self.assertAlmostEqual(peaks.getPeak(0).getWavelength(),
                               1.54,
                               delta=1e-10)
Esempio n. 6
0
wl = 1.54
k = 2 * np.pi / wl
theta = np.deg2rad(47)
phi = np.deg2rad(13)

q_lab = np.array([
    -np.sin(theta) * np.cos(phi), -np.sin(theta) * np.sin(phi),
    1 - np.cos(theta)
]) * k

q_sample = np.dot(np.linalg.inv(R), q_lab)

peaks = CreatePeaksWorkspace(OutputType="LeanElasticPeak", NumberOfPeaks=0)

p = peaks.createPeakQSample(q_sample)
peaks.addPeak(p)

HFIRCalculateGoniometer(peaks, wl, OverrideProperty=True, InnerGoniometer=True)

g = Goniometer()
g.setR(peaks.getPeak(0).getGoniometerMatrix())
print(g.getEulerAngles('YZY'))
assert np.isclose(g.getEulerAngles('YZY')[0], 42)

chi = np.deg2rad(-3)
phi = np.deg2rad(23)

R1 = np.array([
    [np.cos(omega), 0, -np.sin(omega)],  # omega 0,1,0,-1
    [0, 1, 0],
    [np.sin(omega), 0, np.cos(omega)]
Esempio n. 7
0
    def PyExec(self):
        # create peaks workspace to store linked peaks
        linked_peaks = CreatePeaksWorkspace(
            InstrumentWorkspace=self._workspace,
            NumberOfPeaks=0,
            StoreInADS=False)

        # create peaks table to store linked predicted peaks
        linked_peaks_predicted = CreatePeaksWorkspace(
            InstrumentWorkspace=self._workspace,
            NumberOfPeaks=0,
            StoreInADS=False)

        for m in range(0, self._iterations):
            if m == 0:
                predictor = self._predicted_peaks
            if m > 0:
                predictor = linked_peaks_predicted

            qtol_var = self._qtol * self._qdecrement**m
            num_peaks_var = self._num_peaks + self._peak_increment * m

            # add q_lab and dpsacing values of found peaks to a list
            qlabs_observed = np.array(self._observed_peaks.column("QLab"))
            dspacings_observed = np.array(
                self._observed_peaks.column("DSpacing"))

            # sort the predicted peaks from largest to smallest dspacing
            qlabs_predicted = np.array(predictor.column("QLab"))
            dspacings_predicted = np.array(predictor.column("DSpacing"))

            # get the indexing list that sorts dspacing from largest to
            # smallest
            hkls = np.array([[p.getH(), p.getK(), p.getL()]
                             for p in predictor])
            idx = dspacings_predicted.argsort()[::-1]
            HKL_predicted = hkls[idx, :]

            # sort q, d and h, k, l by this indexing
            qlabs_predicted = qlabs_predicted[idx]
            dspacings_predicted = dspacings_predicted[idx]

            q_ordered = qlabs_predicted[:num_peaks_var]
            d_ordered = dspacings_predicted[:num_peaks_var]
            HKL_ordered = HKL_predicted[:num_peaks_var]

            # loop through the ordered find peaks, compare q and d to each
            # predicted peak if the q and d values of a found peak match a
            # predicted peak within tolerance, the found peak inherits
            # the HKL of the predicted peak
            for i in range(len(qlabs_observed)):
                qx_obs, qy_obs, qz_obs = qlabs_observed[i]
                q_obs = V3D(qx_obs, qy_obs, qz_obs)
                p_obs = linked_peaks.createPeak(q_obs)
                d_obs = dspacings_observed[i]

                for j in range(len(q_ordered)):
                    qx_pred, qy_pred, qz_pred = q_ordered[j]
                    d_pred = d_ordered[j]

                    if (qx_pred - qtol_var <= qx_obs <= qx_pred + qtol_var and
                            qy_pred - qtol_var <= qy_obs <= qy_pred + qtol_var
                            and
                            qz_pred - qtol_var <= qz_obs <= qz_pred + qtol_var
                            and d_pred - self._dtol <= d_obs <=
                            d_pred + self._dtol):
                        h, k, l = HKL_ordered[j]
                        p_obs.setHKL(h, k, l)
                        linked_peaks.addPeak(p_obs)

            # Clean up peaks where H == K == L == 0
            linked_peaks = FilterPeaks(linked_peaks,
                                       FilterVariable="h^2+k^2+l^2",
                                       Operator="!=",
                                       FilterValue="0")

            # force UB on linked_peaks using known lattice parameters
            CalculateUMatrix(PeaksWorkspace=linked_peaks,
                             a=self._a,
                             b=self._b,
                             c=self._c,
                             alpha=self._alpha,
                             beta=self._beta,
                             gamma=self._gamma,
                             StoreInADS=False)

            # new linked predicted peaks
            linked_peaks_predicted = PredictPeaks(
                InputWorkspace=linked_peaks,
                WavelengthMin=self._wavelength_min,
                WavelengthMax=self._wavelength_max,
                MinDSpacing=self._min_dspacing,
                MaxDSpacing=self._max_dspacing,
                ReflectionCondition=self._reflection_condition,
                StoreInADS=False)

        # clean up
        self.setProperty("LinkedPeaks", linked_peaks)
        self.setProperty("LinkedPredictedPeaks", linked_peaks_predicted)
        if mtd.doesExist("linked_peaks"):
            DeleteWorkspace(linked_peaks)
        if mtd.doesExist("linked_peaks_predicted"):
            DeleteWorkspace(linked_peaks_predicted)
        if self._delete_ws:
            DeleteWorkspace(self._workspace)
Esempio n. 8
0
    def PyExec(self):
        # create peaks workspace to store linked peaks
        linked_peaks = CreatePeaksWorkspace(
            InstrumentWorkspace=self._workspace,
            NumberOfPeaks=0,
            StoreInADS=False)

        # create peaks table to store linked predicted peaks
        linked_peaks_predicted = CreatePeaksWorkspace(
            InstrumentWorkspace=self._workspace,
            NumberOfPeaks=0,
            StoreInADS=False)

        for m in range(0, self._iterations):
            if m == 0:
                predictor = self._predicted_peaks
            if m > 0:
                predictor = linked_peaks_predicted

            qtol_var = self._qtol * self._qdecrement**m
            num_peaks_var = self._num_peaks + self._peak_increment * m

            # add q_lab and dpsacing values of found peaks to a list
            qlabs_observed = np.array(self._observed_peaks.column(15))
            dspacings_observed = np.array(self._observed_peaks.column(8))

            # sort the predicted peaks from largest to smallest dspacing
            qlabs_predicted = np.array(predictor.column(15))
            dspacings_predicted = np.array(predictor.column(8))

            # get the indexing list that sorts dspacing from largest to
            # smallest
            hkls = np.array([[p['h'], p['k'], p['l']] for p in predictor])
            idx = dspacings_predicted.argsort()[::-1]
            HKL_predicted = hkls[idx, :]

            # sort q, d and h, k, l by this indexing
            qlabs_predicted = qlabs_predicted[idx]
            dspacings_predicted = dspacings_predicted[idx]

            q_ordered = qlabs_predicted[:num_peaks_var]
            d_ordered = dspacings_predicted[:num_peaks_var]
            HKL_ordered = HKL_predicted[:num_peaks_var]

            # loop through the ordered find peaks, compare q and d to each
            # predicted peak if the q and d values of a found peak match a
            # predicted peak within tolerance, the found peak inherits
            # the HKL of the predicted peak
            for i in range(len(qlabs_observed)):
                qx_obs, qy_obs, qz_obs = qlabs_observed[i]
                q_obs = V3D(qx_obs, qy_obs, qz_obs)
                p_obs = linked_peaks.createPeak(q_obs)
                d_obs = dspacings_observed[i]

                for j in range(len(q_ordered)):
                    qx_pred, qy_pred, qz_pred = q_ordered[j]
                    d_pred = d_ordered[j]

                    if (qx_pred - qtol_var <= qx_obs <= qx_pred +
                        qtol_var and qy_pred - qtol_var <= qy_obs <= qy_pred +
                        qtol_var and qz_pred - qtol_var <= qz_obs <= qz_pred +
                        qtol_var and d_pred - self._dtol <= d_obs <= d_pred +
                            self._dtol):
                        h, k, l = HKL_ordered[j]
                        p_obs.setHKL(h, k, l)
                        linked_peaks.addPeak(p_obs)

            # Clean up peaks where H == K == L == 0
            linked_peaks = FilterPeaks(linked_peaks,
                                       FilterVariable="h^2+k^2+l^2",
                                       Operator="!=",
                                       FilterValue="0")

            # force UB on linked_peaks using known lattice parameters
            CalculateUMatrix(PeaksWorkspace=linked_peaks,
                             a=self._a,
                             b=self._b,
                             c=self._c,
                             alpha=self._alpha,
                             beta=self._beta,
                             gamma=self._gamma,
                             StoreInADS=False)

            # new linked predicted peaks
            linked_peaks_predicted = PredictPeaks(
                InputWorkspace=linked_peaks,
                WavelengthMin=self._wavelength_min,
                WavelengthMax=self._wavelength_max,
                MinDSpacing=self._min_dspacing,
                MaxDSpacing=self._max_dspacing,
                ReflectionCondition=self._reflection_condition,
                StoreInADS=False)

        # clean up
        self.setProperty("LinkedPeaks", linked_peaks)
        self.setProperty("LinkedPredictedPeaks", linked_peaks_predicted)
        if mtd.doesExist("linked_peaks"):
            DeleteWorkspace(linked_peaks)
        if mtd.doesExist("linked_peaks_predicted"):
            DeleteWorkspace(linked_peaks_predicted)
        if self._delete_ws:
            DeleteWorkspace(self._workspace)
Esempio n. 9
0
    def PyExec(self):
        input_workspaces = self._expand_groups()
        outWS = self.getPropertyValue("OutputWorkspace")
        CreatePeaksWorkspace(OutputType='LeanElasticPeak',
                             InstrumentWorkspace=input_workspaces[0],
                             NumberOfPeaks=0,
                             OutputWorkspace=outWS,
                             EnableLogging=False)

        method = self.getProperty("Method").value
        n_bkgr_pts = self.getProperty("NumBackgroundPts").value
        n_fwhm = self.getProperty("WidthScale").value
        scale = self.getProperty("ScaleFactor").value
        chisqmax = self.getProperty("ChiSqMax").value
        signalNoiseMin = self.getProperty("SignalNoiseMin").value
        ll = self.getProperty("LowerLeft").value
        ur = self.getProperty("UpperRight").value
        startX = self.getProperty('StartX').value
        endX = self.getProperty('EndX').value
        use_lorentz = self.getProperty("ApplyLorentz").value
        optmize_q = self.getProperty("OptimizeQVector").value
        output_fit = self.getProperty("OutputFitResults").value

        if output_fit and method != "Counts":
            fit_results = WorkspaceGroup()
            AnalysisDataService.addOrReplace(outWS + "_fit_results",
                                             fit_results)

        for inWS in input_workspaces:
            tmp_inWS = '__tmp_' + inWS
            IntegrateMDHistoWorkspace(InputWorkspace=inWS,
                                      P1Bin=f'{ll[1]},{ur[1]}',
                                      P2Bin=f'{ll[0]},{ur[0]}',
                                      OutputWorkspace=tmp_inWS,
                                      EnableLogging=False)
            ConvertMDHistoToMatrixWorkspace(tmp_inWS,
                                            OutputWorkspace=tmp_inWS,
                                            EnableLogging=False)
            data = ConvertToPointData(tmp_inWS,
                                      OutputWorkspace=tmp_inWS,
                                      EnableLogging=False)

            run = mtd[inWS].getExperimentInfo(0).run()
            scan_log = 'omega' if np.isclose(run.getTimeAveragedStd('phi'),
                                             0.0) else 'phi'
            scan_axis = run[scan_log].value
            scan_step = (scan_axis[-1] - scan_axis[0]) / (scan_axis.size - 1)
            data.setX(0, scan_axis)

            y = data.extractY().flatten()
            x = data.extractX().flatten()

            __tmp_pw = CreatePeaksWorkspace(OutputType='LeanElasticPeak',
                                            InstrumentWorkspace=inWS,
                                            NumberOfPeaks=0,
                                            EnableLogging=False)

            if method != "Counts":
                # fit against gaussian with flat background for both the Fitted and CountsWithFitting methods
                fit_result = self._fit_gaussian(inWS, data, x, y, startX, endX,
                                                output_fit)

                if fit_result and fit_result.OutputStatus == 'success' and fit_result.OutputChi2overDoF < chisqmax:
                    B, A, peak_centre, sigma, _ = fit_result.OutputParameters.toDict(
                    )['Value']
                    _, errA, _, errs, _ = fit_result.OutputParameters.toDict(
                    )['Error']

                    if method == "Fitted":
                        integrated_intensity = A * sigma * np.sqrt(2 * np.pi)

                        # Convert correlation back into covariance
                        cor_As = (
                            fit_result.OutputNormalisedCovarianceMatrix.cell(
                                1, 4) / 100 *
                            fit_result.OutputParameters.cell(1, 2) *
                            fit_result.OutputParameters.cell(3, 2))
                        # σ^2 = 2π (A^2 σ_s^2 + σ_A^2 s^2 + 2 A s σ_As)
                        integrated_intensity_error = np.sqrt(
                            2 * np.pi * (A**2 * errs**2 + sigma**2 * errA**2 +
                                         2 * A * sigma * cor_As))

                    elif method == "CountsWithFitting":
                        y = y[slice(
                            np.searchsorted(
                                x, peak_centre - 2.3548 * sigma * n_fwhm / 2),
                            np.searchsorted(
                                x, peak_centre + 2.3548 * sigma * n_fwhm / 2))]
                        # subtract out the fitted flat background
                        integrated_intensity = (y.sum() -
                                                B * y.size) * scan_step
                        integrated_intensity_error = np.sum(
                            np.sqrt(y)) * scan_step

                    # update the goniometer position based on the fitted peak center
                    if scan_log == 'omega':
                        SetGoniometer(Workspace=__tmp_pw,
                                      Axis0=f'{peak_centre},0,1,0,-1',
                                      Axis1='chi,0,0,1,-1',
                                      Axis2='phi,0,1,0,-1',
                                      EnableLogging=False)
                    else:
                        SetGoniometer(Workspace=__tmp_pw,
                                      Axis0='omega,0,1,0,-1',
                                      Axis1='chi,0,0,1,-1',
                                      Axis2=f'{peak_centre},0,1,0,-1',
                                      EnableLogging=False)
                else:
                    self.log().warning(
                        "Failed to fit workspace {}: Output Status={}, ChiSq={}"
                        .format(inWS, fit_result.OutputStatus,
                                fit_result.OutputChi2overDoF))
                    self._delete_tmp_workspaces(str(__tmp_pw), tmp_inWS)
                    continue
            else:
                integrated_intensity, integrated_intensity_error = self._counts_integration(
                    data, n_bkgr_pts, scan_step)

                # set the goniometer position to use the average of the scan
                SetGoniometer(Workspace=__tmp_pw,
                              Axis0='omega,0,1,0,-1',
                              Axis1='chi,0,0,1,-1',
                              Axis2='phi,0,1,0,-1',
                              EnableLogging=False)

            integrated_intensity *= scale
            integrated_intensity_error *= scale

            peak = __tmp_pw.createPeakHKL([
                run['h'].getStatistics().median,
                run['k'].getStatistics().median,
                run['l'].getStatistics().median
            ])
            peak.setWavelength(float(run['wavelength'].value))
            peak.setIntensity(integrated_intensity)
            peak.setSigmaIntensity(integrated_intensity_error)

            if integrated_intensity / integrated_intensity_error > signalNoiseMin:
                __tmp_pw.addPeak(peak)

                # correct q-vector using CentroidPeaksMD
                if optmize_q:
                    __tmp_q_ws = HB3AAdjustSampleNorm(InputWorkspaces=inWS,
                                                      NormaliseBy='None',
                                                      EnableLogging=False)
                    __tmp_pw = CentroidPeaksMD(__tmp_q_ws,
                                               __tmp_pw,
                                               EnableLogging=False)
                    DeleteWorkspace(__tmp_q_ws, EnableLogging=False)

                if use_lorentz:
                    # ILL Neutron Data Booklet, Second Edition, Section 2.9, Part 4.1, Equation 7
                    peak = __tmp_pw.getPeak(0)
                    lorentz = abs(
                        np.sin(peak.getScattering() *
                               np.cos(peak.getAzimuthal())))
                    peak.setIntensity(peak.getIntensity() * lorentz)
                    peak.setSigmaIntensity(peak.getSigmaIntensity() * lorentz)

                CombinePeaksWorkspaces(outWS,
                                       __tmp_pw,
                                       OutputWorkspace=outWS,
                                       EnableLogging=False)

                if output_fit and method != "Counts":
                    fit_results.addWorkspace(
                        RenameWorkspace(tmp_inWS + '_Workspace',
                                        outWS + "_" + inWS + '_Workspace',
                                        EnableLogging=False))
                    fit_results.addWorkspace(
                        RenameWorkspace(tmp_inWS + '_Parameters',
                                        outWS + "_" + inWS + '_Parameters',
                                        EnableLogging=False))
                    fit_results.addWorkspace(
                        RenameWorkspace(
                            tmp_inWS + '_NormalisedCovarianceMatrix',
                            outWS + "_" + inWS + '_NormalisedCovarianceMatrix',
                            EnableLogging=False))
                    fit_results.addWorkspace(
                        IntegrateMDHistoWorkspace(
                            InputWorkspace=inWS,
                            P1Bin=f'{ll[1]},0,{ur[1]}',
                            P2Bin=f'{ll[0]},0,{ur[0]}',
                            P3Bin='0,{}'.format(
                                mtd[inWS].getDimension(2).getNBins()),
                            OutputWorkspace=outWS + "_" + inWS + "_ROI",
                            EnableLogging=False))
            else:
                self.log().warning(
                    "Skipping peak from {} because Signal/Noise={:.3f} which is less than {}"
                    .format(inWS,
                            integrated_intensity / integrated_intensity_error,
                            signalNoiseMin))

            self._delete_tmp_workspaces(str(__tmp_pw), tmp_inWS)

        self.setProperty("OutputWorkspace", mtd[outWS])
    def PyExec(self):
        input_workspaces = self._expand_groups()
        outWS = self.getPropertyValue("OutputWorkspace")
        CreatePeaksWorkspace(OutputType='LeanElasticPeak',
                             InstrumentWorkspace=input_workspaces[0],
                             NumberOfPeaks=0,
                             OutputWorkspace=outWS,
                             EnableLogging=False)

        scale = self.getProperty("ScaleFactor").value
        chisqmax = self.getProperty("ChiSqMax").value
        signalNoiseMin = self.getProperty("SignalNoiseMin").value
        ll = self.getProperty("LowerLeft").value
        ur = self.getProperty("UpperRight").value
        startX = self.getProperty('StartX').value
        endX = self.getProperty('EndX').value
        use_lorentz = self.getProperty("ApplyLorentz").value
        optmize_q = self.getProperty("OptimizeQVector").value
        output_fit = self.getProperty("OutputFitResults").value

        if output_fit:
            fit_results = WorkspaceGroup()
            AnalysisDataService.addOrReplace(outWS + "_fit_results",
                                             fit_results)

        for inWS in input_workspaces:
            tmp_inWS = '__tmp_' + inWS
            IntegrateMDHistoWorkspace(InputWorkspace=inWS,
                                      P1Bin=f'{ll[1]},{ur[1]}',
                                      P2Bin=f'{ll[0]},{ur[0]}',
                                      OutputWorkspace=tmp_inWS,
                                      EnableLogging=False)
            ConvertMDHistoToMatrixWorkspace(tmp_inWS,
                                            OutputWorkspace=tmp_inWS,
                                            EnableLogging=False)
            data = ConvertToPointData(tmp_inWS,
                                      OutputWorkspace=tmp_inWS,
                                      EnableLogging=False)

            run = mtd[inWS].getExperimentInfo(0).run()
            scan_log = 'omega' if np.isclose(run.getTimeAveragedStd('phi'),
                                             0.0) else 'phi'
            scan_axis = run[scan_log].value
            data.setX(0, scan_axis)

            y = data.extractY().flatten()
            x = data.extractX().flatten()
            function = f"name=FlatBackground, A0={np.nanmin(y)};" \
                f"name=Gaussian, PeakCentre={x[np.nanargmax(y)]}, Height={np.nanmax(y)-np.nanmin(y)}, Sigma=0.25"
            constraints = f"f0.A0 > 0, f1.Height > 0, {x.min()} < f1.PeakCentre < {x.max()}"
            try:
                fit_result = Fit(function,
                                 data,
                                 Output=str(data),
                                 IgnoreInvalidData=True,
                                 OutputParametersOnly=not output_fit,
                                 Constraints=constraints,
                                 StartX=startX,
                                 EndX=endX,
                                 EnableLogging=False)
            except RuntimeError as e:
                self.log().warning("Failed to fit workspace {}: {}".format(
                    inWS, e))
                continue

            if fit_result.OutputStatus == 'success' and fit_result.OutputChi2overDoF < chisqmax:
                __tmp_pw = CreatePeaksWorkspace(OutputType='LeanElasticPeak',
                                                InstrumentWorkspace=inWS,
                                                NumberOfPeaks=0,
                                                EnableLogging=False)

                _, A, x, s, _ = fit_result.OutputParameters.toDict()['Value']
                _, errA, _, errs, _ = fit_result.OutputParameters.toDict(
                )['Error']

                if scan_log == 'omega':
                    SetGoniometer(Workspace=__tmp_pw,
                                  Axis0=f'{x},0,1,0,-1',
                                  Axis1='chi,0,0,1,-1',
                                  Axis2='phi,0,1,0,-1',
                                  EnableLogging=False)
                else:
                    SetGoniometer(Workspace=__tmp_pw,
                                  Axis0='omega,0,1,0,-1',
                                  Axis1='chi,0,0,1,-1',
                                  Axis2=f'{x},0,1,0,-1',
                                  EnableLogging=False)

                peak = __tmp_pw.createPeakHKL([
                    run['h'].getStatistics().median,
                    run['k'].getStatistics().median,
                    run['l'].getStatistics().median
                ])
                peak.setWavelength(float(run['wavelength'].value))

                integrated_intensity = A * s * np.sqrt(2 * np.pi) * scale
                peak.setIntensity(integrated_intensity)

                # Convert correlation back into covariance
                cor_As = (
                    fit_result.OutputNormalisedCovarianceMatrix.cell(1, 4) /
                    100 * fit_result.OutputParameters.cell(1, 2) *
                    fit_result.OutputParameters.cell(3, 2))
                # σ^2 = 2π (A^2 σ_s^2 + σ_A^2 s^2 + 2 A s σ_As)
                integrated_intensity_error = np.sqrt(
                    2 * np.pi * (A**2 * errs**2 + s**2 * errA**2 +
                                 2 * A * s * cor_As)) * scale
                peak.setSigmaIntensity(integrated_intensity_error)

                if integrated_intensity / integrated_intensity_error > signalNoiseMin:
                    __tmp_pw.addPeak(peak)

                    # correct q-vector using CentroidPeaksMD
                    if optmize_q:
                        __tmp_q_ws = HB3AAdjustSampleNorm(InputWorkspaces=inWS,
                                                          NormaliseBy='None',
                                                          EnableLogging=False)
                        __tmp_pw = CentroidPeaksMD(__tmp_q_ws,
                                                   __tmp_pw,
                                                   EnableLogging=False)
                        DeleteWorkspace(__tmp_q_ws, EnableLogging=False)

                    if use_lorentz:
                        # ILL Neutron Data Booklet, Second Edition, Section 2.9, Part 4.1, Equation 7
                        peak = __tmp_pw.getPeak(0)
                        lorentz = abs(
                            np.sin(peak.getScattering() *
                                   np.cos(peak.getAzimuthal())))
                        peak.setIntensity(peak.getIntensity() * lorentz)
                        peak.setSigmaIntensity(peak.getSigmaIntensity() *
                                               lorentz)

                    CombinePeaksWorkspaces(outWS,
                                           __tmp_pw,
                                           OutputWorkspace=outWS,
                                           EnableLogging=False)
                    DeleteWorkspace(__tmp_pw, EnableLogging=False)

                    if output_fit:
                        fit_results.addWorkspace(
                            RenameWorkspace(tmp_inWS + '_Workspace',
                                            outWS + "_" + inWS + '_Workspace',
                                            EnableLogging=False))
                        fit_results.addWorkspace(
                            RenameWorkspace(tmp_inWS + '_Parameters',
                                            outWS + "_" + inWS + '_Parameters',
                                            EnableLogging=False))
                        fit_results.addWorkspace(
                            RenameWorkspace(tmp_inWS +
                                            '_NormalisedCovarianceMatrix',
                                            outWS + "_" + inWS +
                                            '_NormalisedCovarianceMatrix',
                                            EnableLogging=False))
                        fit_results.addWorkspace(
                            IntegrateMDHistoWorkspace(
                                InputWorkspace=inWS,
                                P1Bin=f'{ll[1]},0,{ur[1]}',
                                P2Bin=f'{ll[0]},0,{ur[0]}',
                                P3Bin='0,{}'.format(
                                    mtd[inWS].getDimension(2).getNBins()),
                                OutputWorkspace=outWS + "_" + inWS + "_ROI",
                                EnableLogging=False))
                else:
                    self.log().warning(
                        "Skipping peak from {} because Signal/Noise={:.3f} which is less than {}"
                        .format(
                            inWS,
                            integrated_intensity / integrated_intensity_error,
                            signalNoiseMin))
            else:
                self.log().warning(
                    "Failed to fit workspace {}: Output Status={}, ChiSq={}".
                    format(inWS, fit_result.OutputStatus,
                           fit_result.OutputChi2overDoF))

            for tmp_ws in (tmp_inWS, tmp_inWS + '_Workspace',
                           tmp_inWS + '_Parameters',
                           tmp_inWS + '_NormalisedCovarianceMatrix'):
                if mtd.doesExist(tmp_ws):
                    DeleteWorkspace(tmp_ws, EnableLogging=False)

        self.setProperty("OutputWorkspace", mtd[outWS])