def runTest(self):
     # Load raw data (bank 1)
     wsMD = LoadMD(
         "WISH38237_MD.nxs")  # default so doesn't get overwrite van
     # For each mod vec, predict and integrate peaks and combine
     qs = [(0.15, 0, 0.3), (-0.15, 0, 0.3)]
     all_pks = CreatePeaksWorkspace(InstrumentWorkspace=wsMD,
                                    NumberOfPeaks=0,
                                    OutputWorkspace="all_pks")
     LoadIsawUB(InputWorkspace=all_pks,
                Filename='Wish_Diffuse_Scattering_ISAW_UB.mat')
     # PredictPeaks
     parent = PredictPeaks(InputWorkspace=all_pks,
                           WavelengthMin=0.8,
                           WavelengthMax=9.3,
                           MinDSpacing=0.5,
                           ReflectionCondition="Primitive")
     self._pfps = []
     self._saved_files = []
     for iq, q in enumerate(qs):
         wsname = f'pfp_{iq}'
         PredictFractionalPeaks(Peaks=parent,
                                IncludeAllPeaksInRange=True,
                                Hmin=0,
                                Hmax=0,
                                Kmin=1,
                                Kmax=1,
                                Lmin=0,
                                Lmax=1,
                                ReflectionCondition='Primitive',
                                MaxOrder=1,
                                ModVector1=",".join([str(qi) for qi in q]),
                                FracPeaks=wsname)
         FilterPeaks(InputWorkspace=wsname,
                     OutputWorkspace=wsname,
                     FilterVariable='Wavelength',
                     FilterValue=9.3,
                     Operator='<')  # should get rid of one peak in q1 table
         FilterPeaks(InputWorkspace=wsname,
                     OutputWorkspace=wsname,
                     FilterVariable='Wavelength',
                     FilterValue=0.8,
                     Operator='>')
         IntegratePeaksMD(InputWorkspace=wsMD,
                          PeakRadius='0.1',
                          BackgroundInnerRadius='0.1',
                          BackgroundOuterRadius='0.15',
                          PeaksWorkspace=wsname,
                          OutputWorkspace=wsname,
                          IntegrateIfOnEdge=False,
                          UseOnePercentBackgroundCorrection=False)
         all_pks = CombinePeaksWorkspaces(LHSWorkspace=all_pks,
                                          RHSWorkspace=wsname)
         self._pfps.append(ADS.retrieve(wsname))
     self._filepath = os.path.join(config['defaultsave.directory'],
                                   'WISH_IntegratedSatellite.int')
     SaveReflections(InputWorkspace=all_pks,
                     Filename=self._filepath,
                     Format='Jana')
     self._all_pks = all_pks
예제 #2
0
def SCDCalibratePanels2DiagnosticsPlot(
    peaksWorkspace: Union[PeaksWorkspace, str],
    banknames: Union[str, List[str]] = None,
    config: Dict[str, str] = {
        "prefix": "fig",
        "type": "png",
        "saveto": ".",
        "mode": "modern",
    },
    showPlots: bool = True,
) -> None:
    """
    Generate diagnostic plots from SCDCalibratePanels2

    @param peaksWorkspace: peaks workspace with calibrated instrument applied
    @param banknames: bank(s) for diagnostics
    @param config: plot configuration dictionary
            type: ["png", "pdf", "jpeg"]
            mode: ["modern", "legacy"]
    @param showPlots: open diagnostics plots after generating them.

    @returs: None
    """
    # parse input
    pws = mtd[peaksWorkspace] if isinstance(peaksWorkspace,
                                            str) else peaksWorkspace
    logging.info(f"Start diagnostics with {peaksWorkspace.name()}.")

    # process all banks if banknames is None
    if banknames is None:
        banknames = set(
            [pws.row(i)["BankName"] for i in range(pws.getNumberPeaks())])
    elif isinstance(banknames, str):
        banknames = [me.strip() for me in banknames.split(",")]
    else:
        pass

    # one bank at a time
    for bn in banknames:
        logging.info(f"--processing bank: {bn}")
        pws_filtered = FilterPeaks(
            InputWorkspace=pws,
            FilterVariable='h^2+k^2+l^2',
            FilterValue=0,
            Operator='>',
            BankName=bn,
        )
        # generate the plot
        figname = f"{config['prefix']}_{bn}.{config['type']}"
        SCDCalibratePanels2DiagnosticsPlotBank(
            filteredPeaksWS=pws_filtered,
            figname=figname,
            savedir=config["saveto"],
            showPlots=showPlots,
            mode=config["mode"],
        )

    # close backend handle
    if not showPlots:
        plt.close("all")
    def runTest(self):
        ws = LoadRaw(Filename='WISH00038237.raw', OutputWorkspace='38237')
        ws = ConvertUnits(ws, 'dSpacing', OutputWorkspace='38237')
        UB = np.array([[-0.00601763,  0.07397297,  0.05865706],
                       [ 0.05373321,  0.050198,   -0.05651455],
                       [-0.07822144,  0.0295911,  -0.04489172]])

        SetUB(ws, UB=UB)

        self._peaks = PredictPeaks(ws, WavelengthMin=0.1, WavelengthMax=100,
                                   OutputWorkspace='peaks')
        # We specifically want to check peak -5 -1 -7 exists, so filter for it
        self._filtered = FilterPeaks(self._peaks, "h^2+k^2+l^2", 75, '=',
                                     OutputWorkspace='filtered')

        SaveIsawPeaks(self._peaks, Filename='WISHSXReductionPeaksTest.peaks')
    def runTest(self):
        ws = LoadRaw(Filename='WISH00038237.raw', OutputWorkspace='38237')
        ws = ConvertUnits(ws, 'dSpacing', OutputWorkspace='38237')
        UB = np.array([[-0.00601763,  0.07397297,  0.05865706],
                       [ 0.05373321,  0.050198,   -0.05651455],
                       [-0.07822144,  0.0295911,  -0.04489172]])

        SetUB(ws, UB=UB)

        self._peaks = PredictPeaks(ws, WavelengthMin=0.1, WavelengthMax=100,
                                   OutputWorkspace='peaks')
        # We specifically want to check peak -5 -1 -7 exists, so filter for it
        self._filtered = FilterPeaks(self._peaks, "h^2+k^2+l^2", 75, '=',
                                     OutputWorkspace='filtered')

        SaveIsawPeaks(self._peaks, Filename='WISHSXReductionPeaksTest.peaks')
class WISHSingleCrystalPeakPredictionTest(MantidSystemTest):
    """
    At the time of writing WISH users rely quite heavily on the PredictPeaks
    algorithm. As WISH has tubes rather than rectangular detectors sometimes
    peaks fall between the gaps in the tubes.

    Here we check that PredictPeaks works on a real WISH dataset & UB. This also
    includes an example of a peak whose center is predicted to fall between two
    tubes.
    """
    def requiredFiles(self):
        return ["WISHPredictedSingleCrystalPeaks.nxs"]

    def cleanup(self):
        ADS.clear()
        try:
            os.remove(self._peaks_file)
        except:
            pass

    def runTest(self):
        ws = LoadEmptyInstrument(InstrumentName='WISH')
        UB = np.array([[-0.00601763, 0.07397297, 0.05865706],
                       [0.05373321, 0.050198, -0.05651455],
                       [-0.07822144, 0.0295911, -0.04489172]])

        SetUB(ws, UB=UB)

        self._peaks = PredictPeaks(ws,
                                   WavelengthMin=0.1,
                                   WavelengthMax=100,
                                   OutputWorkspace='peaks')
        # We specifically want to check peak -5 -1 -7 exists, so filter for it
        self._filtered = FilterPeaks(self._peaks,
                                     "h^2+k^2+l^2",
                                     75,
                                     '=',
                                     OutputWorkspace='filtered')

        SaveIsawPeaks(self._peaks, Filename='WISHSXReductionPeaksTest.peaks')

    def validate(self):
        self.assertEqual(self._peaks.rowCount(), 527)
        self.assertEqual(self._filtered.rowCount(), 7)

        # The peak at [-5 -1 -7] is known to fall between the gaps of WISH's tubes
        # Specifically check this one is predicted to exist because past bugs have
        # been found in the ray tracing.
        BasicPeak = namedtuple('Peak', ('DetID', 'BankName', 'h', 'k', 'l'))
        expected = BasicPeak(DetID=9202086,
                             BankName='WISHpanel09',
                             h=-5.0,
                             k=-1.0,
                             l=-7.0)
        expected_peak_found = False
        peak_count = self._filtered.rowCount()
        for i in range(
                peak_count
        ):  # iterate of the table representation of the PeaksWorkspace
            peak_row = self._filtered.row(i)
            peak = BasicPeak(**{k: peak_row[k] for k in BasicPeak._fields})
            if peak == expected:
                expected_peak_found = True
                break
        self.assertTrue(
            expected_peak_found,
            msg="Peak at {} expected but it was not found".format(expected))
        self._peaks_file = os.path.join(config['defaultsave.directory'],
                                        'WISHSXReductionPeaksTest.peaks')
        self.assertTrue(os.path.isfile(self._peaks_file))

        return self._peaks.name(), "WISHPredictedSingleCrystalPeaks.nxs"
예제 #6
0
    def PyExec(self):
        # create peaks workspace to store linked peaks
        linked_peaks = CreatePeaksWorkspace(
            InstrumentWorkspace=self._workspace,
            NumberOfPeaks=0,
            StoreInADS=False)

        # create peaks table to store linked predicted peaks
        linked_peaks_predicted = CreatePeaksWorkspace(
            InstrumentWorkspace=self._workspace,
            NumberOfPeaks=0,
            StoreInADS=False)

        for m in range(0, self._iterations):
            if m == 0:
                predictor = self._predicted_peaks
            if m > 0:
                predictor = linked_peaks_predicted

            qtol_var = self._qtol * self._qdecrement**m
            num_peaks_var = self._num_peaks + self._peak_increment * m

            # add q_lab and dpsacing values of found peaks to a list
            qlabs_observed = np.array(self._observed_peaks.column("QLab"))
            dspacings_observed = np.array(
                self._observed_peaks.column("DSpacing"))

            # sort the predicted peaks from largest to smallest dspacing
            qlabs_predicted = np.array(predictor.column("QLab"))
            dspacings_predicted = np.array(predictor.column("DSpacing"))

            # get the indexing list that sorts dspacing from largest to
            # smallest
            hkls = np.array([[p.getH(), p.getK(), p.getL()]
                             for p in predictor])
            idx = dspacings_predicted.argsort()[::-1]
            HKL_predicted = hkls[idx, :]

            # sort q, d and h, k, l by this indexing
            qlabs_predicted = qlabs_predicted[idx]
            dspacings_predicted = dspacings_predicted[idx]

            q_ordered = qlabs_predicted[:num_peaks_var]
            d_ordered = dspacings_predicted[:num_peaks_var]
            HKL_ordered = HKL_predicted[:num_peaks_var]

            # loop through the ordered find peaks, compare q and d to each
            # predicted peak if the q and d values of a found peak match a
            # predicted peak within tolerance, the found peak inherits
            # the HKL of the predicted peak
            for i in range(len(qlabs_observed)):
                qx_obs, qy_obs, qz_obs = qlabs_observed[i]
                q_obs = V3D(qx_obs, qy_obs, qz_obs)
                p_obs = linked_peaks.createPeak(q_obs)
                d_obs = dspacings_observed[i]

                for j in range(len(q_ordered)):
                    qx_pred, qy_pred, qz_pred = q_ordered[j]
                    d_pred = d_ordered[j]

                    if (qx_pred - qtol_var <= qx_obs <= qx_pred + qtol_var and
                            qy_pred - qtol_var <= qy_obs <= qy_pred + qtol_var
                            and
                            qz_pred - qtol_var <= qz_obs <= qz_pred + qtol_var
                            and d_pred - self._dtol <= d_obs <=
                            d_pred + self._dtol):
                        h, k, l = HKL_ordered[j]
                        p_obs.setHKL(h, k, l)
                        linked_peaks.addPeak(p_obs)

            # Clean up peaks where H == K == L == 0
            linked_peaks = FilterPeaks(linked_peaks,
                                       FilterVariable="h^2+k^2+l^2",
                                       Operator="!=",
                                       FilterValue="0")

            # force UB on linked_peaks using known lattice parameters
            CalculateUMatrix(PeaksWorkspace=linked_peaks,
                             a=self._a,
                             b=self._b,
                             c=self._c,
                             alpha=self._alpha,
                             beta=self._beta,
                             gamma=self._gamma,
                             StoreInADS=False)

            # new linked predicted peaks
            linked_peaks_predicted = PredictPeaks(
                InputWorkspace=linked_peaks,
                WavelengthMin=self._wavelength_min,
                WavelengthMax=self._wavelength_max,
                MinDSpacing=self._min_dspacing,
                MaxDSpacing=self._max_dspacing,
                ReflectionCondition=self._reflection_condition,
                StoreInADS=False)

        # clean up
        self.setProperty("LinkedPeaks", linked_peaks)
        self.setProperty("LinkedPredictedPeaks", linked_peaks_predicted)
        if mtd.doesExist("linked_peaks"):
            DeleteWorkspace(linked_peaks)
        if mtd.doesExist("linked_peaks_predicted"):
            DeleteWorkspace(linked_peaks_predicted)
        if self._delete_ws:
            DeleteWorkspace(self._workspace)
class WISHSingleCrystalPeakPredictionTest(MantidSystemTest):
    """
    At the time of writing WISH users rely quite heavily on the PredictPeaks
    algorithm. As WISH has tubes rather than rectangular detectors sometimes
    peaks fall between the gaps in the tubes.

    Here we check that PredictPeaks works on a real WISH dataset & UB. This also
    includes an example of a peak whose center is predicted to fall between two
    tubes.
    """
    def requiredFiles(self):
        return ["WISH00038237.raw", "WISHPredictedSingleCrystalPeaks.nxs"]

    def requiredMemoryMB(self):
        # Need lots of memory for full WISH dataset
        return 24000

    def cleanup(self):
        try:
            os.path.remove(self._peaks_file)
        except:
            pass

    def runTest(self):
        ws = LoadRaw(Filename='WISH00038237.raw', OutputWorkspace='38237')
        ws = ConvertUnits(ws, 'dSpacing', OutputWorkspace='38237')
        UB = np.array([[-0.00601763, 0.07397297, 0.05865706],
                       [0.05373321, 0.050198, -0.05651455],
                       [-0.07822144, 0.0295911, -0.04489172]])

        SetUB(ws, UB=UB)

        self._peaks = PredictPeaks(ws,
                                   WavelengthMin=0.1,
                                   WavelengthMax=100,
                                   OutputWorkspace='peaks')
        # We specifically want to check peak -5 -1 -7 exists, so filter for it
        self._filtered = FilterPeaks(self._peaks,
                                     "h^2+k^2+l^2",
                                     75,
                                     '=',
                                     OutputWorkspace='filtered')

        SaveIsawPeaks(self._peaks, Filename='WISHSXReductionPeaksTest.peaks')

    def validate(self):
        self.assertEqual(self._peaks.rowCount(), 510)
        self.assertEqual(self._filtered.rowCount(), 6)

        # The peak at [-5 -1 -7] is known to fall between the gaps of WISH's tubes
        # Specifically check this one is predicted to exist because past bugs have
        # been found in the ray tracing.
        BasicPeak = namedtuple('Peak', ('DetID', 'BankName', 'h', 'k', 'l'))
        expected = BasicPeak(DetID=9202086,
                             BankName='WISHpanel09',
                             h=-5.0,
                             k=-1.0,
                             l=-7.0)
        expected_peak_found = False
        for full_peak in self._filtered:
            peak = BasicPeak(DetID=full_peak.getDetectorID(),
                             BankName=full_peak.getBankName(),
                             h=full_peak.getH(),
                             k=full_peak.getK(),
                             l=full_peak.getL())
            if peak == expected:
                expected_peak_found = True
                break
        #endfor
        self.assertTrue(
            expected_peak_found,
            msg="Peak at {} expected but it was not found".format(expected))
        self._peaks_file = os.path.join(config['defaultsave.directory'],
                                        'WISHSXReductionPeaksTest.peaks')
        self.assertTrue(os.path.isfile(self._peaks_file))

        return self._peaks.name(), "WISHPredictedSingleCrystalPeaks.nxs"
예제 #8
0
    def PyExec(self):
        input_workspaces, peak_workspaces = self._expand_groups()
        output_workspace_name = self.getPropertyValue("OutputWorkspace")

        peak_radius = self.getProperty("PeakRadius").value
        inner_radius = self.getProperty("BackgroundInnerRadius").value
        outer_radius = self.getProperty("BackgroundOuterRadius").value

        remove_0_intensity = self.getProperty("RemoveZeroIntensity").value
        use_lorentz = self.getProperty("ApplyLorentz").value

        multi_ws = len(input_workspaces) > 1

        output_workspaces = []

        for input_ws, peak_ws in zip(input_workspaces, peak_workspaces):
            if multi_ws:
                peaks_ws_name = input_ws + '_' + output_workspace_name
                output_workspaces.append(peaks_ws_name)
            else:
                peaks_ws_name = output_workspace_name

            IntegratePeaksMD(InputWorkspace=input_ws,
                             PeakRadius=peak_radius,
                             BackgroundInnerRadius=inner_radius,
                             BackgroundOuterRadius=outer_radius,
                             PeaksWorkspace=peak_ws,
                             OutputWorkspace=peaks_ws_name)

        if multi_ws:
            peaks_ws_name = output_workspace_name
            CreatePeaksWorkspace(
                InstrumentWorkspace=input_workspaces[0],
                NumberOfPeaks=0,
                OutputWorkspace=peaks_ws_name,
                OutputType=mtd[peak_workspaces[0]].id().replace(
                    'sWorkspace', ''))
            CopySample(InputWorkspace=output_workspaces[0],
                       OutputWorkspace=peaks_ws_name,
                       CopyName=False,
                       CopyMaterial=False,
                       CopyEnvironment=False,
                       CopyShape=False,
                       CopyLattice=True)
            for peak_ws in output_workspaces:
                CombinePeaksWorkspaces(peaks_ws_name,
                                       peak_ws,
                                       OutputWorkspace=peaks_ws_name)
                DeleteWorkspace(peak_ws)

        if use_lorentz:
            # Apply Lorentz correction:
            peaks = AnalysisDataService[peaks_ws_name]
            for p in range(peaks.getNumberPeaks()):
                peak = peaks.getPeak(p)
                lorentz = abs(
                    np.sin(peak.getScattering() * np.cos(peak.getAzimuthal())))
                peak.setIntensity(peak.getIntensity() * lorentz)

        if remove_0_intensity:
            FilterPeaks(InputWorkspace=peaks_ws_name,
                        OutputWorkspace=peaks_ws_name,
                        FilterVariable='Intensity',
                        FilterValue=0,
                        Operator='>')

        # Write output only if a file path was provided
        if not self.getProperty("OutputFile").isDefault:
            out_format = self.getProperty("OutputFormat").value
            filename = self.getProperty("OutputFile").value

            if out_format == "SHELX":
                SaveHKL(InputWorkspace=peaks_ws_name,
                        Filename=filename,
                        DirectionCosines=True,
                        OutputWorkspace="__tmp")
                DeleteWorkspace("__tmp")
            elif out_format == "Fullprof":
                SaveReflections(InputWorkspace=peaks_ws_name,
                                Filename=filename,
                                Format="Fullprof")
            else:
                # This shouldn't happen
                RuntimeError("Invalid output format given")

        self.setProperty("OutputWorkspace", AnalysisDataService[peaks_ws_name])
class WISHSingleCrystalPeakPredictionTest(MantidSystemTest):
    """
    At the time of writing WISH users rely quite heavily on the PredictPeaks
    algorithm. As WISH has tubes rather than rectangular detectors sometimes
    peaks fall between the gaps in the tubes.

    Here we check that PredictPeaks works on a real WISH dataset & UB. This also
    includes an example of a peak whose center is predicted to fall between two
    tubes.
    """

    def requiredFiles(self):
        return ["WISH00038237.raw", "WISHPredictedSingleCrystalPeaks.nxs"]

    def requiredMemoryMB(self):
        # Need lots of memory for full WISH dataset
        return 16000

    def cleanup(self):
        try:
            os.path.remove(self._peaks_file)
        except:
            pass

    def runTest(self):
        ws = LoadRaw(Filename='WISH00038237.raw', OutputWorkspace='38237')
        ws = ConvertUnits(ws, 'dSpacing', OutputWorkspace='38237')
        UB = np.array([[-0.00601763,  0.07397297,  0.05865706],
                       [ 0.05373321,  0.050198,   -0.05651455],
                       [-0.07822144,  0.0295911,  -0.04489172]])

        SetUB(ws, UB=UB)

        self._peaks = PredictPeaks(ws, WavelengthMin=0.1, WavelengthMax=100,
                                   OutputWorkspace='peaks')
        # We specifically want to check peak -5 -1 -7 exists, so filter for it
        self._filtered = FilterPeaks(self._peaks, "h^2+k^2+l^2", 75, '=',
                                     OutputWorkspace='filtered')

        SaveIsawPeaks(self._peaks, Filename='WISHSXReductionPeaksTest.peaks')

    def validate(self):
        self.assertEqual(self._peaks.rowCount(), 510)
        self.assertEqual(self._filtered.rowCount(), 6)

        # The peak at [-5 -1 -7] is known to fall between the gaps of WISH's tubes
        # Specifically check this one is predicted to exist because past bugs have
        # been found in the ray tracing.
        Peak = namedtuple('Peak', ('DetID', 'BankName', 'h', 'k', 'l'))
        expected = Peak(DetID=9202086, BankName='WISHpanel09', h=-5.0, k=-1.0, l=-7.0)
        expected_peak_found = False
        for row in self._filtered:
            peak = Peak(DetID=row['DetID'], BankName=row['BankName'], h=row['h'], k=row['k'], l=row['l'])
            if peak == expected:
                expected_peak_found = True
                break
        #endfor
        self.assertTrue(expected_peak_found, msg="Peak at {} expected but it was not found".format(expected))
        self._peaks_file = os.path.join(config['defaultsave.directory'], 'WISHSXReductionPeaksTest.peaks')
        self.assertTrue(os.path.isfile(self._peaks_file))

        return self._peaks.name(), "WISHPredictedSingleCrystalPeaks.nxs"