示例#1
0
    def validateWorkspaces(self, valNames=None, mismatchName=None):
        """
        Performs a check that two workspaces are equal using the CheckWorkspacesMatch
        algorithm. Loads one workspace from a nexus file if appropriate.
        Returns true if: the workspaces match 
                      OR the validate method has not been overridden.
        Returns false if the workspace do not match. The reason will be in the log.
        """
        if valNames is None:
            valNames = self.validate()

        from mantid.simpleapi import SaveNexus, AlgorithmManager

        checker = AlgorithmManager.create("CheckWorkspacesMatch")
        checker.setLogging(True)
        checker.setPropertyValue("Workspace1", valNames[0])
        checker.setPropertyValue("Workspace2", valNames[1])
        checker.setPropertyValue("Tolerance", str(self.tolerance))
        if hasattr(self, "tolerance_is_reller") and self.tolerance_is_reller:
            checker.setPropertyValue("ToleranceRelerr", "1")
        for d in self.disableChecking:
            checker.setPropertyValue("Check" + d, "0")
        checker.execute()
        if checker.getPropertyValue("Result") != "Success!":
            print self.__class__.__name__
            if mismatchName:
                SaveNexus(InputWorkspace=valNames[0], Filename=self.__class__.__name__ + mismatchName + "-mismatch.nxs")
            else:
                SaveNexus(InputWorkspace=valNames[0], Filename=self.__class__.__name__ + "-mismatch.nxs")
            return False

        return True
示例#2
0
    def validateWorkspaces(self, valNames=None, mismatchName=None):
        '''
        Performs a check that two workspaces are equal using the CompareWorkspaces
        algorithm. Loads one workspace from a nexus file if appropriate.
        Returns true if: the workspaces match
                      OR the validate method has not been overridden.
        Returns false if the workspace do not match. The reason will be in the log.
        '''
        if valNames is None:
            valNames = self.validate()

        checker = AlgorithmManager.create("CompareWorkspaces")
        checker.setLogging(True)
        checker.setPropertyValue("Workspace1", valNames[0])
        checker.setPropertyValue("Workspace2", valNames[1])
        checker.setProperty("Tolerance", float(self.tolerance))
        if hasattr(self, 'tolerance_is_reller') and self.tolerance_is_reller:
            checker.setProperty("ToleranceRelerr", True)
        for d in self.disableChecking:
            checker.setProperty("Check"+d, False)
        checker.execute()
        if not checker.getProperty("Result").value:
            print(self.__class__.__name__)
            if mismatchName:
                SaveNexus(InputWorkspace=valNames[0],
                          Filename=self.__class__.__name__+mismatchName+'-mismatch.nxs')
            else:
                SaveNexus(InputWorkspace=valNames[0],
                          Filename=self.__class__.__name__+'-mismatch.nxs')
            return False

        return True
示例#3
0
    def __init__(self, acquisitionMode):
        """
        Create the export model by providing an aquisition mode.

        Args:
            acquisitionMode (str): acquisition mode
        """
        self._exportAlgorithms = {
            k: v
            for k, v in
            RundexSettings.EXPORT_ALGORITHMS[acquisitionMode].items()
        }
        self._exportExtensions = dict()
        self._exportDocs = dict()
        for a in self._exportAlgorithms.keys():
            if a in RundexSettings.EXPORT_ALGO_EXTENSION:
                self._exportExtensions[a] = \
                    RundexSettings.EXPORT_ALGO_EXTENSION[a]
            try:
                alg = AlgorithmManager.createUnmanaged(a)
                self._exportDocs[a] = alg.summary()
            except:
                pass
        self._pool = DrillAlgorithmPool()
        self._pool.signals.taskError.connect(self._onTaskError)
        self._pool.signals.taskSuccess.connect(self._onTaskSuccess)
        self._exports = dict()
        self._successExports = dict()
示例#4
0
 def test_sort_x_works_child(self):
     # Create unsorted workspace
     parent = AlgorithmManager.createUnmanaged('Load')
     create_ws_alg = parent.createChildAlgorithm("CreateWorkspace")
     dataX = [4., 3., 2., 1.]
     dataY = [1., 2., 3.]
     dataE = [1., 2., 3.]
     create_ws_alg.setProperty("DataX", dataX)
     create_ws_alg.setProperty("DataY", dataY)
     create_ws_alg.setProperty("DataE", dataE)
     create_ws_alg.setProperty("UnitX",'TOF')
     create_ws_alg.setProperty("Distribution", False)
     create_ws_alg.execute()
     # Run the algorithm
     sort_alg = parent.createChildAlgorithm("SortXAxis")
     sort_alg.setProperty("InputWorkspace", create_ws_alg.getProperty("OutputWorkspace").value)
     sort_alg.execute()
     # Check the resulting data values. Sorting operation should have resulted in no changes
     sortedws = sort_alg.getProperty("OutputWorkspace").value
     sortedX = sortedws.readX(0)
     sortedY = sortedws.readY(0)
     sortedE = sortedws.readE(0)
     self.assertEqual(sorted(dataX), sortedX.tolist())
     dataY.reverse()
     dataE.reverse()
     self.assertEqual(dataY, sortedY.tolist())
     self.assertEqual(dataE, sortedE.tolist())
示例#5
0
 def test_sort_x_works_child(self):
     # Create unsorted workspace
     parent = AlgorithmManager.createUnmanaged('Load')
     create_ws_alg = parent.createChildAlgorithm("CreateWorkspace")
     dataX = [4., 3., 2., 1.]
     dataY = [1., 2., 3.]
     dataE = [1., 2., 3.]
     create_ws_alg.setProperty("DataX", dataX)
     create_ws_alg.setProperty("DataY", dataY)
     create_ws_alg.setProperty("DataE", dataE)
     create_ws_alg.setProperty("UnitX", 'TOF')
     create_ws_alg.setProperty("Distribution", False)
     create_ws_alg.execute()
     # Run the algorithm
     sort_alg = parent.createChildAlgorithm("SortXAxis")
     sort_alg.setProperty(
         "InputWorkspace",
         create_ws_alg.getProperty("OutputWorkspace").value)
     sort_alg.execute()
     # Check the resulting data values. Sorting operation should have resulted in no changes
     sortedws = sort_alg.getProperty("OutputWorkspace").value
     sortedX = sortedws.readX(0)
     sortedY = sortedws.readY(0)
     sortedE = sortedws.readE(0)
     self.assertEqual(sorted(dataX), sortedX.tolist())
     dataY.reverse()
     dataE.reverse()
     self.assertEqual(dataY, sortedY.tolist())
     self.assertEqual(dataE, sortedE.tolist())
 def test_incorrect_run_number(self):
     alg = AlgorithmManager.create('LoadElementalAnalysisData')
     alg.setChild(True)
     alg.initialize()
     alg.setProperty('Run', 1)
     alg.setProperty('GroupWorkspace', '1')
     errors = alg.validateInputs()
     self.assertTrue("Run" in errors)
     self.assertEquals(len(errors), 1)
     self.assertFalse(AnalysisDataService.doesExist("1"))
示例#7
0
def run_reduction(input_workspace: EventWorkspace, workspace_name: str,
                  settings_file: str, output_dir: str):  # Run reduction
    # Get the angle
    angle = get_angle(input_workspace)
    params = find_angle_parameters_from_settings_json(settings_file, angle)

    alg = AlgorithmManager.create("ReflectometryISISLoadAndProcess")
    properties = {
        "InputRunList": workspace_name,
        "FirstTransmissionRunList": params.first_transmission_run_list,
        "SecondTransmissionRunList": params.second_transmission_run_list,
        "ThetaIn": angle,
        "DetectorCorrectionType": params.detector_correction_type,
        "MonitorBackgroundWavelengthMin":
        params.monitor_background_wavelength_min,
        "MonitorBackgroundWavelengthMax":
        params.monitor_background_wavelength_max,
        "MonitorIntegrationWavelengthMin":
        params.monitor_integration_wavelength_min,
        "MonitorIntegrationWavelengthMax":
        params.monitor_integration_wavelength_max,
        "WavelengthMin": params.wavelength_min,
        "WavelengthMax": params.wavelength_max,
        "I0MonitorIndex": params.i_zero_monitor_index,
        "AnalysisMode": params.analysis_mode,
        "StartOverlap": params.start_overlap,
        "EndOverlap": params.end_overlap,
        "ScaleRHSWorkspace": params.scale_rhs_workspace,
        "TransmissionProcessingInstructions":
        params.transmission_processing_instructions,
        "ProcessingInstructions": params.processing_instructions
    }
    alg.setProperties(properties)
    alg.execute()

    # Save reduced data as Nexus files
    OutputWorkspace = alg.getPropertyValue("OutputWorkspace")
    OutputWorkspaceBinned = alg.getPropertyValue("OutputWorkspaceBinned")

    SaveNexus(OutputWorkspace,
              os.path.join(output_dir, OutputWorkspace + ".nxs"))
    SaveNexus(OutputWorkspaceBinned,
              os.path.join(output_dir, OutputWorkspaceBinned + ".nxs"))

    # Save a copy of the .json settings file
    copy(settings_file, output_dir)

    return OutputWorkspaceBinned
示例#8
0
    def checkWorkspacesMatch(self, ws1, ws2):
        from mantid.simpleapi import SaveNexus, AlgorithmManager
        checker = AlgorithmManager.create("CheckWorkspacesMatch")
        checker.setLogging(True)
        checker.setPropertyValue("Workspace1", ws1)
        checker.setPropertyValue("Workspace2", ws2)
        checker.setPropertyValue("Tolerance", str(self.tolerance))
        checker.setPropertyValue("CheckInstrument","0")

        checker.execute()

        if checker.getPropertyValue("Result") != 'Success!':
            print self.__class__.__name__
            SaveNexus(InputWorkspace=ws2,Filename=self.__class__.__name__+'-mismatch.nxs')
            return False

        return True
    def validate(self):
        results = 'SCDR_output'
        reference = 'SingleCrystalDiffuseReduction.nxs'

        Load(Filename=reference,OutputWorkspace=reference)

        checker = AlgorithmManager.create("CompareMDWorkspaces")
        checker.setLogging(True)
        checker.setPropertyValue("Workspace1",results)
        checker.setPropertyValue("Workspace2",reference)
        checker.setPropertyValue("Tolerance", "1e-7")

        checker.execute()
        if checker.getPropertyValue("Equals") != "1":
            print(" Workspaces do not match, result: ",checker.getPropertyValue("Result"))
            print(self.__class__.__name__)
            SaveMD(InputWorkspace=results,Filename=self.__class__.__name__+'-mismatch.nxs')
            return False

        return True
示例#10
0
    def validate(self):
        results = 'SCDR_output'
        reference = 'SingleCrystalDiffuseReduction.nxs'

        Load(Filename=reference,OutputWorkspace=reference)

        checker = AlgorithmManager.create("CompareMDWorkspaces")
        checker.setLogging(True)
        checker.setPropertyValue("Workspace1",results)
        checker.setPropertyValue("Workspace2",reference)
        checker.setPropertyValue("Tolerance", "1e-7")

        checker.execute()
        if checker.getPropertyValue("Equals") != "1":
            print(" Workspaces do not match, result: ",checker.getPropertyValue("Result"))
            print(self.__class__.__name__)
            SaveMD(InputWorkspace=results,Filename=self.__class__.__name__+'-mismatch.nxs')
            return False

        return True
    def validate(self):
        results = 'CMRSCMD_output_HKL'
        reference = 'ConvertMultipleRunsToSingleCrystalMD_HKL.nxs'

        Load(Filename=reference,OutputWorkspace=reference)

        checker = AlgorithmManager.create("CompareMDWorkspaces")
        checker.setLogging(True)
        checker.setPropertyValue("Workspace1",results)
        checker.setPropertyValue("Workspace2",reference)
        checker.setPropertyValue("Tolerance", "1e-5")
        checker.setPropertyValue("IgnoreBoxID", "1")

        checker.execute()
        if checker.getPropertyValue("Equals") != "1":
            print(" Workspaces do not match, result: ",checker.getPropertyValue("Result"))
            print(self.__class__.__name__)
            SaveMD(InputWorkspace=results,Filename=self.__class__.__name__+'-mismatch.nxs')
            return False

        return True
    def validate(self):
        results = 'CMRSCMD_output_HKL'
        reference = 'ConvertMultipleRunsToSingleCrystalMD_HKL.nxs'

        Load(Filename=reference,OutputWorkspace=reference)

        checker = AlgorithmManager.create("CompareMDWorkspaces")
        checker.setLogging(True)
        checker.setPropertyValue("Workspace1",results)
        checker.setPropertyValue("Workspace2",reference)
        checker.setPropertyValue("Tolerance", "1e-5")
        checker.setPropertyValue("IgnoreBoxID", "1")

        checker.execute()
        if checker.getPropertyValue("Equals") != "1":
            print(" Workspaces do not match, result: ",checker.getPropertyValue("Result"))
            print(self.__class__.__name__)
            SaveMD(InputWorkspace=results,Filename=self.__class__.__name__+'-mismatch.nxs')
            return False

        return True
示例#13
0
def fit_tof_iteration(sample_data, container_data, runs, flags):
    """
    Performs a single iterations of the time of flight corrections and fitting
    workflow.

    :param sample_data: Loaded sample data workspaces
    :param container_data: Loaded container data workspaces
    :param runs: A string specifying the runs to process
    :param flags: A dictionary of flags to control the processing
    :return: Tuple of (workspace group name, pre correction fit parameters,
             final fit parameters, chi^2 values)
    """
    # Transform inputs into something the algorithm can understand
    if isinstance(flags['masses'][0], list):
        mass_values = _create_profile_strs_and_mass_list(copy.deepcopy(flags['masses'][0]))[0]
        profiles_strs = []
        for mass_spec in flags['masses']:
            profiles_strs.append(_create_profile_strs_and_mass_list(mass_spec)[1])
    else:
        mass_values, profiles_strs = _create_profile_strs_and_mass_list(flags['masses'])
    background_str = _create_background_str(flags.get('background', None))
    intensity_constraints = _create_intensity_constraint_str(flags['intensity_constraints'])

    # The simpleapi function won't have been created so do it by hand
    VesuvioTOFFit = _create_algorithm_function("VesuvioTOFFit", 1,
                                               AlgorithmManager.createUnmanaged("VesuvioTOFFit"))
    VesuvioCorrections = _create_algorithm_function("VesuvioCorrections", 1,
                                                    AlgorithmManager.createUnmanaged("VesuvioCorrections"))

    num_spec = sample_data.getNumberHistograms()
    pre_correct_pars_workspace = None
    pars_workspace = None
    max_fit_iterations = flags.get('max_fit_iterations', 5000)

    output_groups = []
    chi2_values = []
    for index in range(num_spec):
        if isinstance(profiles_strs, list):
            profiles = profiles_strs[index]
        else:
            profiles = profiles_strs

        suffix = _create_fit_workspace_suffix(index,
                                              sample_data,
                                              flags['fit_mode'],
                                              flags['spectra'],
                                              flags.get('iteration', None))

        # Corrections
        corrections_args = dict()

        # Need to do a fit first to obtain the parameter table
        pre_correction_pars_name = runs + "_params_pre_correction" + suffix
        corrections_fit_name = "__vesuvio_corrections_fit"
        VesuvioTOFFit(InputWorkspace=sample_data,
                      WorkspaceIndex=index,
                      Masses=mass_values,
                      MassProfiles=profiles,
                      Background=background_str,
                      IntensityConstraints=intensity_constraints,
                      OutputWorkspace=corrections_fit_name,
                      FitParameters=pre_correction_pars_name,
                      MaxIterations=max_fit_iterations,
                      Minimizer=flags['fit_minimizer'])
        DeleteWorkspace(corrections_fit_name)
        corrections_args['FitParameters'] = pre_correction_pars_name

        # Add the mutiple scattering arguments
        corrections_args.update(flags['ms_flags'])

        corrected_data_name = runs + "_tof_corrected" + suffix
        linear_correction_fit_params_name = runs + "_correction_fit_scale" + suffix

        if flags.get('output_verbose_corrections', False):
            corrections_args["CorrectionWorkspaces"] = runs + "_correction" + suffix
            corrections_args["CorrectedWorkspaces"] = runs + "_corrected" + suffix

        if container_data is not None:
            corrections_args["ContainerWorkspace"] = container_data

        VesuvioCorrections(InputWorkspace=sample_data,
                           OutputWorkspace=corrected_data_name,
                           LinearFitResult=linear_correction_fit_params_name,
                           WorkspaceIndex=index,
                           GammaBackground=flags.get('gamma_correct', False),
                           Masses=mass_values,
                           MassProfiles=profiles,
                           IntensityConstraints=intensity_constraints,
                           MultipleScattering=True,
                           GammaBackgroundScale=flags.get('fixed_gamma_scaling', 0.0),
                           ContainerScale=flags.get('fixed_container_scaling', 0.0),
                           **corrections_args)

        # Final fit
        fit_ws_name = runs + "_data" + suffix
        pars_name = runs + "_params" + suffix
        fit_result = VesuvioTOFFit(InputWorkspace=corrected_data_name,
                                   WorkspaceIndex=0, # Corrected data always has a single histogram
                                   Masses=mass_values,
                                   MassProfiles=profiles,
                                   Background=background_str,
                                   IntensityConstraints=intensity_constraints,
                                   OutputWorkspace=fit_ws_name,
                                   FitParameters=pars_name,
                                   MaxIterations=max_fit_iterations,
                                   Minimizer=flags['fit_minimizer'])
        chi2_values.append(fit_result[-1])
        DeleteWorkspace(corrected_data_name)

        # Process parameter tables
        if pre_correct_pars_workspace is None:
            pre_correct_pars_workspace = _create_param_workspace(num_spec, mtd[pre_correction_pars_name])

        if pars_workspace is None:
            pars_workspace = _create_param_workspace(num_spec, mtd[pars_name])

        _update_fit_params(pre_correct_pars_workspace, index, mtd[pre_correction_pars_name], suffix[1:])
        _update_fit_params(pars_workspace, index, mtd[pars_name], suffix[1:])

        DeleteWorkspace(pre_correction_pars_name)
        DeleteWorkspace(pars_name)

        # Process spectrum group
        # Note the ordering of operations here gives the order in the WorkspaceGroup
        group_name = runs + suffix
        output_workspaces = [fit_ws_name, linear_correction_fit_params_name]
        if flags.get('output_verbose_corrections', False):
            output_workspaces += mtd[corrections_args["CorrectionWorkspaces"]].getNames()
            output_workspaces += mtd[corrections_args["CorrectedWorkspaces"]].getNames()
            UnGroupWorkspace(corrections_args["CorrectionWorkspaces"])
            UnGroupWorkspace(corrections_args["CorrectedWorkspaces"])

        output_groups.append(GroupWorkspaces(InputWorkspaces=output_workspaces, OutputWorkspace=group_name))

        # Output the parameter workspaces
        AnalysisDataService.Instance().addOrReplace(runs + "_params_pre_correction" + suffix, pre_correct_pars_workspace)
        AnalysisDataService.Instance().addOrReplace(runs + "_params" + suffix, pars_workspace)

    if len(output_groups) > 1:
        result_ws = output_groups
    else:
        result_ws = output_groups[0]

    return (result_ws, pre_correct_pars_workspace, pars_workspace, chi2_values)