def test_sort_x_works_child(self): # Create unsorted workspace parent = AlgorithmManager.createUnmanaged('Load') create_ws_alg = parent.createChildAlgorithm("CreateWorkspace") dataX = [4., 3., 2., 1.] dataY = [1., 2., 3.] dataE = [1., 2., 3.] create_ws_alg.setProperty("DataX", dataX) create_ws_alg.setProperty("DataY", dataY) create_ws_alg.setProperty("DataE", dataE) create_ws_alg.setProperty("UnitX",'TOF') create_ws_alg.setProperty("Distribution", False) create_ws_alg.execute() # Run the algorithm sort_alg = parent.createChildAlgorithm("SortXAxis") sort_alg.setProperty("InputWorkspace", create_ws_alg.getProperty("OutputWorkspace").value) sort_alg.execute() # Check the resulting data values. Sorting operation should have resulted in no changes sortedws = sort_alg.getProperty("OutputWorkspace").value sortedX = sortedws.readX(0) sortedY = sortedws.readY(0) sortedE = sortedws.readE(0) self.assertEqual(sorted(dataX), sortedX.tolist()) dataY.reverse() dataE.reverse() self.assertEqual(dataY, sortedY.tolist()) self.assertEqual(dataE, sortedE.tolist())
def __init__(self, acquisitionMode): """ Create the export model by providing an aquisition mode. Args: acquisitionMode (str): acquisition mode """ self._exportAlgorithms = { k: v for k, v in RundexSettings.EXPORT_ALGORITHMS[acquisitionMode].items() } self._exportExtensions = dict() self._exportDocs = dict() for a in self._exportAlgorithms.keys(): if a in RundexSettings.EXPORT_ALGO_EXTENSION: self._exportExtensions[a] = \ RundexSettings.EXPORT_ALGO_EXTENSION[a] try: alg = AlgorithmManager.createUnmanaged(a) self._exportDocs[a] = alg.summary() except: pass self._pool = DrillAlgorithmPool() self._pool.signals.taskError.connect(self._onTaskError) self._pool.signals.taskSuccess.connect(self._onTaskSuccess) self._exports = dict() self._successExports = dict()
def test_sort_x_works_child(self): # Create unsorted workspace parent = AlgorithmManager.createUnmanaged('Load') create_ws_alg = parent.createChildAlgorithm("CreateWorkspace") dataX = [4., 3., 2., 1.] dataY = [1., 2., 3.] dataE = [1., 2., 3.] create_ws_alg.setProperty("DataX", dataX) create_ws_alg.setProperty("DataY", dataY) create_ws_alg.setProperty("DataE", dataE) create_ws_alg.setProperty("UnitX", 'TOF') create_ws_alg.setProperty("Distribution", False) create_ws_alg.execute() # Run the algorithm sort_alg = parent.createChildAlgorithm("SortXAxis") sort_alg.setProperty( "InputWorkspace", create_ws_alg.getProperty("OutputWorkspace").value) sort_alg.execute() # Check the resulting data values. Sorting operation should have resulted in no changes sortedws = sort_alg.getProperty("OutputWorkspace").value sortedX = sortedws.readX(0) sortedY = sortedws.readY(0) sortedE = sortedws.readE(0) self.assertEqual(sorted(dataX), sortedX.tolist()) dataY.reverse() dataE.reverse() self.assertEqual(dataY, sortedY.tolist()) self.assertEqual(dataE, sortedE.tolist())
def fit_tof_iteration(sample_data, container_data, runs, flags): """ Performs a single iterations of the time of flight corrections and fitting workflow. :param sample_data: Loaded sample data workspaces :param container_data: Loaded container data workspaces :param runs: A string specifying the runs to process :param flags: A dictionary of flags to control the processing :return: Tuple of (workspace group name, pre correction fit parameters, final fit parameters, chi^2 values) """ # Transform inputs into something the algorithm can understand if isinstance(flags['masses'][0], list): mass_values = _create_profile_strs_and_mass_list(copy.deepcopy(flags['masses'][0]))[0] profiles_strs = [] for mass_spec in flags['masses']: profiles_strs.append(_create_profile_strs_and_mass_list(mass_spec)[1]) else: mass_values, profiles_strs = _create_profile_strs_and_mass_list(flags['masses']) background_str = _create_background_str(flags.get('background', None)) intensity_constraints = _create_intensity_constraint_str(flags['intensity_constraints']) # The simpleapi function won't have been created so do it by hand VesuvioTOFFit = _create_algorithm_function("VesuvioTOFFit", 1, AlgorithmManager.createUnmanaged("VesuvioTOFFit")) VesuvioCorrections = _create_algorithm_function("VesuvioCorrections", 1, AlgorithmManager.createUnmanaged("VesuvioCorrections")) num_spec = sample_data.getNumberHistograms() pre_correct_pars_workspace = None pars_workspace = None max_fit_iterations = flags.get('max_fit_iterations', 5000) output_groups = [] chi2_values = [] for index in range(num_spec): if isinstance(profiles_strs, list): profiles = profiles_strs[index] else: profiles = profiles_strs suffix = _create_fit_workspace_suffix(index, sample_data, flags['fit_mode'], flags['spectra'], flags.get('iteration', None)) # Corrections corrections_args = dict() # Need to do a fit first to obtain the parameter table pre_correction_pars_name = runs + "_params_pre_correction" + suffix corrections_fit_name = "__vesuvio_corrections_fit" VesuvioTOFFit(InputWorkspace=sample_data, WorkspaceIndex=index, Masses=mass_values, MassProfiles=profiles, Background=background_str, IntensityConstraints=intensity_constraints, OutputWorkspace=corrections_fit_name, FitParameters=pre_correction_pars_name, MaxIterations=max_fit_iterations, Minimizer=flags['fit_minimizer']) DeleteWorkspace(corrections_fit_name) corrections_args['FitParameters'] = pre_correction_pars_name # Add the mutiple scattering arguments corrections_args.update(flags['ms_flags']) corrected_data_name = runs + "_tof_corrected" + suffix linear_correction_fit_params_name = runs + "_correction_fit_scale" + suffix if flags.get('output_verbose_corrections', False): corrections_args["CorrectionWorkspaces"] = runs + "_correction" + suffix corrections_args["CorrectedWorkspaces"] = runs + "_corrected" + suffix if container_data is not None: corrections_args["ContainerWorkspace"] = container_data VesuvioCorrections(InputWorkspace=sample_data, OutputWorkspace=corrected_data_name, LinearFitResult=linear_correction_fit_params_name, WorkspaceIndex=index, GammaBackground=flags.get('gamma_correct', False), Masses=mass_values, MassProfiles=profiles, IntensityConstraints=intensity_constraints, MultipleScattering=True, GammaBackgroundScale=flags.get('fixed_gamma_scaling', 0.0), ContainerScale=flags.get('fixed_container_scaling', 0.0), **corrections_args) # Final fit fit_ws_name = runs + "_data" + suffix pars_name = runs + "_params" + suffix fit_result = VesuvioTOFFit(InputWorkspace=corrected_data_name, WorkspaceIndex=0, # Corrected data always has a single histogram Masses=mass_values, MassProfiles=profiles, Background=background_str, IntensityConstraints=intensity_constraints, OutputWorkspace=fit_ws_name, FitParameters=pars_name, MaxIterations=max_fit_iterations, Minimizer=flags['fit_minimizer']) chi2_values.append(fit_result[-1]) DeleteWorkspace(corrected_data_name) # Process parameter tables if pre_correct_pars_workspace is None: pre_correct_pars_workspace = _create_param_workspace(num_spec, mtd[pre_correction_pars_name]) if pars_workspace is None: pars_workspace = _create_param_workspace(num_spec, mtd[pars_name]) _update_fit_params(pre_correct_pars_workspace, index, mtd[pre_correction_pars_name], suffix[1:]) _update_fit_params(pars_workspace, index, mtd[pars_name], suffix[1:]) DeleteWorkspace(pre_correction_pars_name) DeleteWorkspace(pars_name) # Process spectrum group # Note the ordering of operations here gives the order in the WorkspaceGroup group_name = runs + suffix output_workspaces = [fit_ws_name, linear_correction_fit_params_name] if flags.get('output_verbose_corrections', False): output_workspaces += mtd[corrections_args["CorrectionWorkspaces"]].getNames() output_workspaces += mtd[corrections_args["CorrectedWorkspaces"]].getNames() UnGroupWorkspace(corrections_args["CorrectionWorkspaces"]) UnGroupWorkspace(corrections_args["CorrectedWorkspaces"]) output_groups.append(GroupWorkspaces(InputWorkspaces=output_workspaces, OutputWorkspace=group_name)) # Output the parameter workspaces AnalysisDataService.Instance().addOrReplace(runs + "_params_pre_correction" + suffix, pre_correct_pars_workspace) AnalysisDataService.Instance().addOrReplace(runs + "_params" + suffix, pars_workspace) if len(output_groups) > 1: result_ws = output_groups else: result_ws = output_groups[0] return (result_ws, pre_correct_pars_workspace, pars_workspace, chi2_values)