コード例 #1
0
def load_bragg_by_filename(file_name):
    """
    Load Bragg diffraction file (including 3-column data file, GSAS file) for Rietveld
    """
    # load with different file type
    base_file_name = os.path.basename(file_name).lower()
    gss_ws_name = os.path.basename(file_name).split('.')[0]
    if base_file_name.endswith('.gss') or base_file_name.endswith(
            '.gsa') or base_file_name.endswith('.gda'):
        simpleapi.LoadGSS(Filename=file_name, OutputWorkspace=gss_ws_name)
    elif base_file_name.endswith('.nxs'):
        simpleapi.LoadNexusProcessed(Filename=file_name,
                                     OutputWorkspace=gss_ws_name)
        simpleapi.ConvertUnits(InputWorkspace=gss_ws_name,
                               OutputWorkspace=gss_ws_name,
                               EMode='Elastic',
                               Target='TOF')
    elif base_file_name.endswith('.dat'):
        simpleapi.LoadAscii(Filename=file_name,
                            OutputWorkspace=gss_ws_name,
                            Unit='TOF')
    else:
        raise RuntimeError('File %s is not of a supported type.' % file_name)

    # check
    assert AnalysisDataService.doesExist(gss_ws_name)
    angle_list = addie.utilities.workspaces.calculate_bank_angle(gss_ws_name)

    return gss_ws_name, angle_list
コード例 #2
0
ファイル: addiedriver.py プロジェクト: Kvieta1990/addie
    def load_sq(self, file_name):
        """
        Load S(Q) to a numpy
        Guarantees: the file is loaded to self._currSQX, _currSQY and _currSQE
        Parameters
        ----------
        file_name :: name of the S(Q)

        Returns
        -------
        2-tuple range of Q
        """
        # generate S(Q) workspace name
        sq_ws_name = os.path.basename(file_name).split('.')[0]

        # call mantid LoadAscii
        ext = file_name.upper().split('.')[-1]
        if ext == 'NXS':
            simpleapi.LoadNexusProcessed(Filename=file_name,
                                         OutputWorkspace=sq_ws_name)
            simpleapi.ConvertUnits(InputWorkspace=sq_ws_name,
                                   OutputWorkspace=sq_ws_name,
                                   EMode='Elastic',
                                   Target='MomentumTransfer')
            simpleapi.ConvertToPointData(
                InputWorkspace=sq_ws_name,
                OutputWorkspace=sq_ws_name)  # TODO REMOVE THIS LINE
        elif ext == 'DAT' or ext == 'txt':
            try:
                simpleapi.LoadAscii(Filename=file_name,
                                    OutputWorkspace=sq_ws_name,
                                    Unit='MomentumTransfer')
            except RuntimeError:
                sq_ws_name, q_min, q_max = "InvalidInput", 0, 0
                return sq_ws_name, q_min, q_max
            # The S(Q) file is in fact S(Q)-1 in sq file.  So need to add 1 to
            # the workspace
            out_ws = AnalysisDataService.retrieve(sq_ws_name)
            out_ws += 1

        assert AnalysisDataService.doesExist(
            sq_ws_name), 'Unable to load S(Q) file %s.' % file_name

        # set to the current S(Q) workspace name
        self._currSqWsName = sq_ws_name
        self._sqIndexDict[self._currSqWsName] = 0

        # get range of Q from the loading
        sq_ws = AnalysisDataService.retrieve(sq_ws_name)
        q_min = sq_ws.readX(0)[0]
        q_max = sq_ws.readX(0)[-1]

        return sq_ws_name, q_min, q_max
コード例 #3
0
    def test_nomad_no_mins(self):
        api.LoadNexusProcessed(Filename='NOM_91796_banks.nxs',
                               OutputWorkspace='NOM_91796_banks')
        alg_test = run_algorithm(
            'CropWorkspaceRagged',
            InputWorkspace='NOM_91796_banks',
            OutputWorkspace='NOM_91796_banks',
            XMax=[10.20, 20.8, np_inf, math_nan, np_nan, 9.35])

        self.assertTrue(alg_test.isExecuted())

        # Verify ....
        outputws = AnalysisDataService.retrieve('NOM_91796_banks')
        for i, Xlen in enumerate([511, 1041, 2001, 2001, 2001,
                                  468]):  # larger than in test_nomad_inplace
            self.assertEqual(len(outputws.readX(i)), Xlen)

        AnalysisDataService.remove('NOM_91796_banks')
コード例 #4
0
    def test_nomad_inplace(self):
        api.LoadNexusProcessed(Filename='NOM_91796_banks.nxs',
                               OutputWorkspace='NOM_91796_banks')
        alg_test = run_algorithm(
            'CropWorkspaceRagged',
            InputWorkspace='NOM_91796_banks',
            OutputWorkspace='NOM_91796_banks',
            XMin=[0.67, 1.20, 2.42, 3.70, 4.12, 0.39],
            XMax=[10.20, 20.8, np_nan, math_nan, np_nan, 9.35])

        self.assertTrue(alg_test.isExecuted())

        # Verify ....
        outputws = AnalysisDataService.retrieve('NOM_91796_banks')
        for i, Xlen in enumerate([477, 981, 1880, 1816, 1795, 448]):
            self.assertEqual(len(outputws.readX(i)), Xlen)

        AnalysisDataService.remove('NOM_91796_banks')
コード例 #5
0
    def test_nomad_no_mins(self):
        api.LoadNexusProcessed(Filename="NOM_91796_banks.nxs",
                               OutputWorkspace="NOM_91796_banks")
        alg_test = run_algorithm(
            "RebinRagged",
            InputWorkspace="NOM_91796_banks",
            OutputWorkspace="NOM_91796_banks",
            Delta=0.04,  # double original data bin size
            XMax=[10.20, 20.8, np_inf, math_nan, np_nan, 9.35])

        self.assertTrue(alg_test.isExecuted())

        # Verify ....
        outputws = AnalysisDataService.retrieve("NOM_91796_banks")
        for i, Xlen in enumerate([256, 521, 1001, 1001, 1001,
                                  235]):  # larger than in test_nomad_inplace
            self.assertEqual(len(outputws.readX(i)), Xlen)

        AnalysisDataService.remove("NOM_91796_banks")
コード例 #6
0
    def test_nomad_inplace(self):
        api.LoadNexusProcessed(Filename="NOM_91796_banks.nxs",
                               OutputWorkspace="NOM_91796_banks")
        alg_test = run_algorithm(
            "RebinRagged",
            InputWorkspace="NOM_91796_banks",
            OutputWorkspace="NOM_91796_banks",
            XMin=[0.67, 1.20, 2.42, 3.70, 4.12, 0.39],
            Delta=0.02,  # original data bin size
            XMax=[10.20, 20.8, np_nan, math_nan, np_nan, 9.35])

        self.assertTrue(alg_test.isExecuted())

        # Verify ....
        outputws = AnalysisDataService.retrieve("NOM_91796_banks")
        for i, Xlen in enumerate([478, 981, 1880, 1816, 1795, 449]):
            self.assertEqual(len(outputws.readX(i)), Xlen)

        AnalysisDataService.remove("NOM_91796_banks")
コード例 #7
0
 def apply_vanadium_corrections(self, cyclevana, i, focused_ws):
     simple.LoadNexusProcessed(Filename=self.get_vanadium(i, cyclevana),
                               OutputWorkspace="vana")
     simple.RebinToWorkspace(WorkspaceToRebin="vana",
                             WorkspaceToMatch=focused_ws,
                             OutputWorkspace="vana")
     simple.Divide(LHSWorkspace=focused_ws,
                   RHSWorkspace="vana",
                   OutputWorkspace=focused_ws)
     simple.DeleteWorkspace("vana")
     simple.ConvertUnits(InputWorkspace=focused_ws,
                         OutputWorkspace=focused_ws,
                         Target="TOF",
                         EMode="Elastic")
     simple.ReplaceSpecialValues(InputWorkspace=focused_ws,
                                 OutputWorkspace=focused_ws,
                                 NaNValue=0.0,
                                 NaNError=0.0,
                                 InfinityValue=0.0,
                                 InfinityError=0.0)
コード例 #8
0
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)

# import mantid algorithms, numpy and matplotlib
import time
import mantid.simpleapi as ms
from mantid import plots
import matplotlib.pyplot as plt

import numpy as np

import sys
sys.path.append('../Calibration')
from Calibration_plots import plot_gr_nr
#f1.show()
#if __name__=="__main__":
ms.LoadNexusProcessed('diamond_gr.nxs', OutputWorkspace='gr')
ms.LoadNexusProcessed('diamond_nr.nxs', OutputWorkspace='nr')
f1 = plot_gr_nr('gr', 'nr', expected_n=[4, 16, 28])
f1.show()
time.sleep(5)
コード例 #9
0
ファイル: test_Deltad_d.py プロジェクト: quantumsteve/addie
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)

# import mantid algorithms, numpy and matplotlib
sys.path.append('../Calibration')
import mantid.simpleapi as ms
import time
import matplotlib.pyplot as plt
from Calibration_plots import plot_delta_d_ttheta
import numpy as np

#ms.LoadNexusProcessed(Filename='NOM_group.nxs', OutputWorkspace='NOM_group')
ms.LoadDetectorsGroupingFile(InputFile='Nom_group_detectors.xml',
                             OutputWorkspace='NOM_group')
ms.LoadNexusProcessed(Filename='NOM_resolution.nxs', OutputWorkspace='NOM_res')

#test case with grouping workspace
f1 = plot_delta_d_ttheta('Nom_res', groupwkspc='Nom_group')
f1.show()

#test case without grouping workspace
f2 = plot_delta_d_ttheta('Nom_res')
f2.show()

time.sleep(5)
コード例 #10
0
ファイル: test_plot_utils.py プロジェクト: Kvieta1990/addie
def diamond_gr():
    nr_filename = os.path.join(DATA_DIR, 'diamond_gr.nxs')
    ms.LoadNexusProcessed(Filename=nr_filename, OutputWorkspace='gr')
    return ms.mtd['gr']
コード例 #11
0
ファイル: test_plot_utils.py プロジェクト: Kvieta1990/addie
def res_wksp():
    res_filename = os.path.join(DATA_DIR, 'NOM_resolution.nxs')
    ms.LoadNexusProcessed(Filename=res_filename, OutputWorkspace='NOM_res')
    return ms.mtd['NOM_res']
コード例 #12
0
ファイル: WishCalibrate.py プロジェクト: PeterParker/mantid
    def runTest(self):
        # This script calibrates WISH using known peak positions from
        # neutron absorbing bands. The workspace with suffix "_calib"
        # contains calibrated data. The workspace with suxxic "_corrected"
        # contains calibrated data with known problematic tubes also corrected

        ws = mantid.LoadNexusProcessed(Filename="WISH30541_integrated.nxs")

        # This array defines the positions of peaks on the detector in
        # meters from the center (0)

        # For wish this is calculated as follows:
        # Height of all 7 bands = 0.26m => each band is separated by 0.260 / 6 = 0.4333m

        # The bands are on a cylinder diameter 0.923m. So we can work out the angle as
        # (0.4333 * n) / (0.923 / 2) where n is the number of bands above (or below) the
        # center band.

        # Putting this together with the distance to the detector tubes (2.2m) we get
        # the following:  (0.4333n) / 0.4615 * 2200 = Expected peak positions
        # From this we can show there should be 5 peaks (peaks 6 + 7 are too high/low)
        # at: 0, 0.206, 0.413 respectively (this is symmetrical so +/-)

        peak_positions = np.array([-0.413, -0.206, 0, 0.206, 0.413])
        funcForm = 5 * [1]  # 5 gaussian peaks
        fitPar = TubeCalibFitParams([59, 161, 258, 353, 448])
        fitPar.setAutomatic(True)

        instrument = ws.getInstrument()
        spec = TubeSpec(ws)

        spec.setTubeSpecByString(instrument.getFullName())

        idealTube = IdealTube()
        idealTube.setArray(peak_positions)

        # First calibrate all of the detectors
        calibrationTable, peaks = tube.calibrate(ws, spec, peak_positions, funcForm, margin=15,
                                                 outputPeak=True, fitPar=fitPar)
        self.calibration_table = calibrationTable

        def findBadPeakFits(peaksTable, threshold=10):
            """ Find peaks whose fit values fall outside of a given tolerance
            of the mean peak centers across all tubes.

            Tubes are defined as have a bad fit if the absolute difference
            between the fitted peak centers for a specific tube and the
            mean of the fitted peak centers for all tubes differ more than
            the threshold parameter.

            @param peakTable: the table containing fitted peak centers
            @param threshold: the tolerance on the difference from the mean value
            @return A list of expected peak positions and a list of indices of tubes
            to correct
            """
            n = len(peaksTable)
            num_peaks = peaksTable.columnCount() - 1
            column_names = ['Peak%d' % i for i in range(1, num_peaks + 1)]
            data = np.zeros((n, num_peaks))
            for i, row in enumerate(peaksTable):
                data_row = [row[name] for name in column_names]
                data[i, :] = data_row

            # data now has all the peaks positions for each tube
            # the mean value is the expected value for the peak position for each tube
            expected_peak_pos = np.mean(data, axis=0)

            # calculate how far from the expected position each peak position is
            distance_from_expected = np.abs(data - expected_peak_pos)
            check = np.where(distance_from_expected > threshold)[0]
            problematic_tubes = list(set(check))
            print("Problematic tubes are: " + str(problematic_tubes))
            return expected_peak_pos, problematic_tubes

        def correctMisalignedTubes(ws, calibrationTable, peaksTable, spec, idealTube, fitPar, threshold=10):
            """ Correct misaligned tubes due to poor fitting results
            during the first round of calibration.

            Misaligned tubes are first identified according to a tolerance
            applied to the absolute difference between the fitted tube
            positions and the mean across all tubes.

            The FindPeaks algorithm is then used to find a better fit
            with the ideal tube positions as starting parameters
            for the peak centers.

            From the refitted peaks the positions of the detectors in the
            tube are recalculated.

            @param ws: the workspace to get the tube geometry from
            @param calibrationTable: the calibration table output from running calibration
            @param peaksTable: the table containing the fitted peak centers from calibration
            @param spec: the tube spec for the instrument
            @param idealTube: the ideal tube for the instrument
            @param fitPar: the fitting parameters for calibration
            @param threshold: tolerance defining is a peak is outside of the acceptable range
            @return table of corrected detector positions
            """
            table_name = calibrationTable.name() + 'Corrected'
            corrections_table = mantid.CreateEmptyTableWorkspace(OutputWorkspace=table_name)
            corrections_table.addColumn('int', "Detector ID")
            corrections_table.addColumn('V3D', "Detector Position")

            mean_peaks, bad_tubes = findBadPeakFits(peaksTable, threshold)

            for index in bad_tubes:
                print("Refitting tube %s" % spec.getTubeName(index))
                tube_dets, _ = spec.getTube(index)
                getPoints(ws, idealTube.getFunctionalForms(), fitPar, tube_dets)
                tube_ws = mantid.mtd['TubePlot']
                fit_ws = mantid.FindPeaks(InputWorkspace=tube_ws, WorkspaceIndex=0,
                                          PeakPositions=fitPar.getPeaks(), PeaksList='RefittedPeaks')
                centers = [row['centre'] for row in fit_ws]
                detIDList, detPosList = getCalibratedPixelPositions(ws, centers, idealTube.getArray(), tube_dets)

                for id, pos in zip(detIDList, detPosList):
                    corrections_table.addRow({'Detector ID': id, 'Detector Position': kernel.V3D(*pos)})

            return corrections_table

        corrected_calibration_table = correctMisalignedTubes(ws, calibrationTable, peaks, spec, idealTube, fitPar)
        self.correction_table = corrected_calibration_table
        tube.saveCalibration(self.correction_table.getName(), out_path=self.calibration_out_path)
        tube.saveCalibration(self.calibration_table.getName(), out_path=self.correction_out_path)