def PyExec(self):
        workspace = get_input_workspace_as_copy_if_not_same_as_output_workspace(
            self)
        wavelength_pairs: List[Tuple[float, float]] = json.loads(
            self.getProperty(self.WAV_PAIRS).value)
        progress = Progress(self,
                            start=0.0,
                            end=1.0,
                            nreports=1 +
                            len(wavelength_pairs))  # 1 - convert units

        # Convert the units into wavelength
        progress.report("Converting workspace to wavelength units.")
        workspace = self._convert_units_to_wavelength(workspace)

        # Get the rebin option
        output_group = WorkspaceGroup()
        for pair in wavelength_pairs:
            rebin_string = self._get_rebin_string(workspace, *pair)
            progress.report(f"Converting wavelength range: {rebin_string}")

            # Perform the rebin
            rebin_options = self._get_rebin_params(rebin_string, workspace)
            out_ws = self._perform_rebin(rebin_options)

            append_to_sans_file_tag(out_ws, "_toWavelength")
            output_group.addWorkspace(out_ws)
        self.setProperty("OutputWorkspace", output_group)
    def _create_matrix_workspaces_for_parameter_combination(
            self, workspace_group: WorkspaceGroup, x_parameter_name: str,
            y_parameter_name: str) -> None:
        """Creates the matrix workspace for a specific x and y parameter, and adds it to the workspace group."""
        if x_parameter_name != y_parameter_name and x_parameter_name in self.x_parameters() \
                and y_parameter_name in self.y_parameters():
            x_values = self._convert_str_column_values_to_int(
                self.fitting_context.x_parameters[x_parameter_name])
            x_errors = self.fitting_context.x_parameter_errors[
                x_parameter_name]
            y_values = self._convert_str_column_values_to_int(
                self.fitting_context.y_parameters[y_parameter_name])
            y_errors = self.fitting_context.y_parameter_errors[
                y_parameter_name]

            # Sort the data based on the x_values being in ascending order
            x_values, x_errors, y_values, y_errors = zip(
                *sorted(zip(x_values, x_errors, y_values, y_errors)))

            output_name = self.parameter_combination_workspace_name(
                x_parameter_name, y_parameter_name)
            if not self._parameter_combination_workspace_exists(
                    output_name, x_values, y_values, y_errors):
                self._create_workspace(x_values, x_errors, x_parameter_name,
                                       y_values, y_errors, y_parameter_name,
                                       output_name)
                workspace_group.add(output_name)
    def test_plot_default_case_with_detector_3_not_present(self):
        grpws = WorkspaceGroup()
        ws_detector1 = '9999; Detector 1'
        grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector1))
        ws_detector2 = '9999; Detector 2'
        grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector2))
        ws_detector3 = '9999; Detector 4'
        grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector3))
        ws_detector4 = '9998; Detector 1'
        grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector4))
        run = [9999, 9998]
        self.context.data_context._loaded_data.add_data(run=run,
                                                        workspace=grpws)
        loadedData = self.context.data_context._loaded_data
        self.context.group_context.reset_group_to_default(loadedData)
        self.presenter.add_group_to_view(self.context.group_context._groups[0],
                                         False)
        self.presenter.add_group_to_view(self.context.group_context._groups[1],
                                         False)
        self.presenter.add_group_to_view(self.context.group_context._groups[2],
                                         False)
        self.presenter.add_group_to_view(self.context.group_context._groups[3],
                                         False)
        self.assertEqual(self.view.num_rows(), 4)
        self.assertEqual(len(self.model.groups), 4)
        analyse_checkbox = self.view.get_table_item(0, 4)
        self.assertEqual(analyse_checkbox.checkState(), 0)

        self.presenter.plot_default_case()
        self.assertCountEqual(self.context.group_context.selected_groups,
                              ['9999; Detector 1', '9998; Detector 1'])
    def _create_matrix_workspaces_for_parameter_combinations(
            self, workspace_group: WorkspaceGroup) -> list:
        """Creates a MatrixWorkspace for each parameter combination. These are the workspaces that will be fitted."""
        workspace_names = []
        for x_parameter_name in self.x_parameters():
            for y_parameter_name in self.y_parameters():
                if x_parameter_name != y_parameter_name:
                    x_values = self._convert_str_column_values_to_int(
                        x_parameter_name, self.fitting_context.x_parameters)
                    x_errors = self.fitting_context.x_parameter_errors[
                        x_parameter_name]
                    y_values = self._convert_str_column_values_to_int(
                        y_parameter_name, self.fitting_context.y_parameters)
                    y_errors = self.fitting_context.y_parameter_errors[
                        y_parameter_name]

                    # Sort the data based on the x_values being in ascending order
                    x_values, x_errors, y_values, y_errors = zip(
                        *sorted(zip(x_values, x_errors, y_values, y_errors)))

                    output_name = self.parameter_combination_workspace_name(
                        x_parameter_name, y_parameter_name)
                    if not self._parameter_combination_workspace_exists(
                            output_name, x_values, y_values, y_errors):
                        self._create_workspace(
                            x_values, x_errors, x_parameter_name, y_values,
                            y_errors, self._create_y_label(y_parameter_name),
                            output_name)
                        workspace_group.add(output_name)

                    workspace_names.append(output_name)

        return workspace_names
Example #5
0
 def _create_sample_ws_group(ws_name):
     # This has to be done as two steps or the simple API can't figure out the output name
     ws_group = WorkspaceGroup()
     ws_group.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_name, Function='Flat background',
                                                 NumBanks=1, BankPixelWidth=1, NumEvents=1,
                                                 XMin=1, XMax=14, BinWidth=2))
     return ws_group
Example #6
0
def add_directory_structure(dirs):
    """
    create the nested WorkspaceGroup structure in the ADS specified by the
    stored directory attribute.
    dirs = ["dir1", "dir2"] eg. ['Muon Data', 'MUSR72105', 'MUSR72105 Raw Data']
    """
    if not dirs:
        return
    if len(dirs) > len(set(dirs)):
        raise ValueError("Group names must be unique")

    for directory in dirs:
        if not AnalysisDataService.doesExist(directory):
            workspace_group = WorkspaceGroup()
            AnalysisDataService.addOrReplace(directory, workspace_group)
        elif not isinstance(AnalysisDataService.retrieve(directory),
                            WorkspaceGroup):
            AnalysisDataService.remove(directory)
            workspace_group = WorkspaceGroup()
            AnalysisDataService.addOrReplace(directory, workspace_group)
        else:
            # exists and is a workspace group
            pass

    # Create the nested group structure in the ADS
    previous_dir = ""
    for i, directory in enumerate(dirs):
        if i == 0:
            previous_dir = directory
            continue
        if not AnalysisDataService.retrieve(previous_dir).__contains__(
                directory):
            AnalysisDataService.retrieve(previous_dir).add(directory)
        previous_dir = directory
Example #7
0
 def loaded_workspace_as_group(self, run):
     if self.is_multi_period():
         workspace_group = WorkspaceGroup()
         for workspace_wrapper in self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace']:
             workspace_group.addWorkspace(workspace_wrapper.workspace)
         return workspace_group
     else:
         return self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace'][0].workspace
    def test_that_can_add_workspaces_to_WorkspaceGroup_when_not_in_ADS(self):
        ws1 = WorkspaceFactory.create("Workspace2D", 2, 2, 2)
        ws2 = WorkspaceFactory.create("Workspace2D", 2, 2, 2)

        ws_group = WorkspaceGroup()

        ws_group.addWorkspace(ws1)
        ws_group.addWorkspace(ws2)

        self.assertEqual(ws_group.size(), 2)
Example #9
0
    def test_that_can_add_workspaces_to_WorkspaceGroup_when_not_in_ADS(self):
        ws1 = WorkspaceFactory.create("Workspace2D", 2, 2, 2)
        ws2 = WorkspaceFactory.create("Workspace2D", 2, 2, 2)

        ws_group = WorkspaceGroup()

        ws_group.addWorkspace(ws1)
        ws_group.addWorkspace(ws2)

        self.assertEqual(ws_group.size(), 2)
    def test_that_can_add_workspaces_to_WorkspaceGroup_when_in_ADS(self):
        self.create_matrix_workspace_in_ADS("ws1")
        self.create_matrix_workspace_in_ADS("ws2")

        ws_group = WorkspaceGroup()
        mtd.add("group1", ws_group)

        ws_group.add("ws1")
        ws_group.add("ws2")

        self.assertTrue("ws1" in mtd["group1"])
        self.assertTrue("ws2" in mtd["group1"])
Example #11
0
    def test_that_can_add_workspaces_to_WorkspaceGroup_when_in_ADS(self):
        self.create_matrix_workspace_in_ADS("ws1")
        self.create_matrix_workspace_in_ADS("ws2")

        ws_group = WorkspaceGroup()
        mtd.add("group1", ws_group)

        ws_group.add("ws1")
        ws_group.add("ws2")

        self.assertTrue("ws1" in mtd["group1"])
        self.assertTrue("ws2" in mtd["group1"])
Example #12
0
    def set_reduced_can_workspace_on_output(self, completed_event_bundled):
        """
        Sets the reduced can workspaces on the output properties.

        The reduced can workspaces can be either LAB or HAB
        :param completed_event_bundled: a list containing output bundles
        """
        # Find the LAB Can and HAB Can entries if they exist
        lab_groups = WorkspaceGroup()
        hab_groups = WorkspaceGroup()

        for bundle in completed_event_bundled:
            if bundle.output_bundle.data_type is DataType.CAN:
                reduction_mode = bundle.output_bundle.reduction_mode
                output_workspace = bundle.output_bundle.output_workspace
                # Make sure that the output workspace is not None which can be the case if there has never been a
                # can set for the reduction.

                if output_workspace is not None and not does_can_workspace_exist_on_ads(
                        output_workspace):
                    if reduction_mode is ReductionMode.LAB:
                        lab_groups.addWorkspace(output_workspace)
                    elif reduction_mode is ReductionMode.HAB:
                        hab_groups.addWorkspace(output_workspace)
                    else:
                        raise RuntimeError(
                            "SANSSingleReduction: The reduction mode {0} should not"
                            " be set with a can.".format(reduction_mode))

        self._set_prop_if_group_has_data("OutputWorkspaceLABCan", lab_groups)
        self._set_prop_if_group_has_data("OutputWorkspaceHABCan", hab_groups)
Example #13
0
    def create_run_workspaces(self, run):
        detectors = ['Detector 1', 'Detector 2', 'Detector 3', 'Detector 4']
        grpws = WorkspaceGroup()
        ws_detector1 = CreateSampleWorkspace()
        grpws.addWorkspace(ws_detector1)
        ws_detector2 = CreateSampleWorkspace()
        grpws.addWorkspace(ws_detector2)
        ws_detector3 = CreateSampleWorkspace()
        grpws.addWorkspace(ws_detector3)
        ws_detector4 = CreateSampleWorkspace()
        grpws.addWorkspace(ws_detector4)
        run_results = RunObject(run, detectors, grpws)

        self.model._loaded_data_store.add_data(run=[run], workspace=grpws)
        self.model._data_context.run_info_update(run_results)
Example #14
0
 def test_finalise_groupworkspace(self):
     """
         The test will fail if it cannot delete a workspace because it does not exist
     """
     grpws = WorkspaceGroup()
     ws_detector1 = CreateSampleWorkspace()
     grpws.addWorkspace(ws_detector1)
     grpws_name = 'New Group Workspace'
     ws1 = CreateSampleWorkspace()
     ws2 = CreateSampleWorkspace()
     ws3 = CreateSampleWorkspace()
     ws_list = [ws1, ws2, ws3]
     load_utils_ea.finalise_groupworkspace(self.model, grpws, grpws_name,
                                           ws_list)
     self.assertTrue(AnalysisDataService.doesExist('New Group Workspace'))
Example #15
0
    def setUp(self) -> None:
        # create a mock database
        # tests save_bank_table and load_bank_table, save_manifest
        self.database_path: str = TestCorelliDatabase.test_dir.name
        date: str = datetime.now().strftime('%Y%m%d')  # format YYYYMMDD

        calibrated_ws = init_corelli_table()
        calibrated_ws.addRow([28672, -1.2497636826045173])
        calibrated_ws.addRow([28673, -1.2462425728938251])
        calibrated_ws.addRow([28674, -1.2427213977528369])
        calibrated_ws.addRow([28675, -1.2392001571797284])
        save_bank_table(calibrated_ws, 10, self.database_path, date)

        calibrated_ws = init_corelli_table()
        calibrated_ws.addRow([28676, -1.2597636826045173])
        calibrated_ws.addRow([28677, -1.2562425728938251])
        calibrated_ws.addRow([28678, -1.2527213977528369])
        calibrated_ws.addRow([28679, -1.2492001571797284])
        save_bank_table(calibrated_ws, 20, self.database_path, date)

        calibrated_ws = init_corelli_table()
        calibrated_ws.addRow([28700, -1.1511478720770645])
        calibrated_ws.addRow([28701, -1.1476249296284657])
        calibrated_ws.addRow([28702, -1.2427213977528369])
        calibrated_ws.addRow([28703, -1.2392001571797284])
        save_bank_table(calibrated_ws, 30, self.database_path, date)

        calibrated_ws = init_corelli_table()
        calibrated_ws.addRow([28704, -1.1611478720770645])
        calibrated_ws.addRow([28705, -1.1776249296284657])
        calibrated_ws.addRow([28706, -1.2827213977528369])
        calibrated_ws.addRow([28707, -1.2992001571797284])
        save_bank_table(calibrated_ws, 40, self.database_path, '20200601')  # use different date

        calibrated_ws = init_corelli_table('calibration_' + str(40))
        calibrated_ws.addRow([28704, -1.1711478720770645])
        calibrated_ws.addRow([28705, -1.1876249296284657])
        calibrated_ws.addRow([28706, -1.2927213977528369])
        calibrated_ws.addRow([28707, -1.3092001571797284])
        save_bank_table(calibrated_ws, 40, self.database_path, '20200101')  # use different date

        # placeholder to read from the database
        self.ws_group = WorkspaceGroup()
        date: str = datetime.now().strftime('%Y%m%d')  # format YYYYMMDD
        self.ws_group.addWorkspace(load_bank_table(10, self.database_path, date))
        self.ws_group.addWorkspace(load_bank_table(20, self.database_path, date))
        self.ws_group.addWorkspace(load_bank_table(30, self.database_path, date))
        self.ws_group.addWorkspace(load_bank_table(40, self.database_path, '20200601'))
Example #16
0
    def test_grouped_workspaces_not_in_ads(self):
        fig = plt.figure()
        plt.plot([0, 1], [0, 1])

        num_plots = 3
        ws_list = []
        ws_group = WorkspaceGroup()
        for i in range(num_plots):
            ws = CloneWorkspace(self._test_ws, StoreInADS=False)
            ws_list.append(ws)
            ws_group.addWorkspace(ws)

        plot([ws_group], wksp_indices=[1], fig=fig, overplot=True)
        ax = plt.gca()
        self.assertEqual(len(ws_group) + 1, len(ax.lines))
        self.assertEqual(len(ws_group) + 1, len(ax.lines))
Example #17
0
def combine_loaded_runs(model, run_list):
    """
        As a result of this function there should be:-
            - a groupworkspace named after the range of runs used e.g. '1234-1237'
            - a workspace for each detector named [detector]_[groupworkspace]
                e.g. 'Detector 1_1234-1237'
    """
    co_add_workspace_name = str(run_list[0]) + "-" + str(run_list[-1])
    co_add_workspace = WorkspaceGroup()
    finished_detectors = []
    ws_remove = []
    for run in run_list:
        run_detectors = get_detectors(model, run)
        detectors_to_check = check_for_unused_detectors(
            run_detectors, finished_detectors)
        if detectors_to_check:
            for detector in run_detectors:
                new_ws_name = detector
                new_ws = find_ws_to_use(model, run_detectors, detector, run)
                for current_run in run_list:
                    if current_run != run:
                        all_detectors = get_detectors(model, current_run)
                        if detector in all_detectors:
                            workspace_to_add = find_ws_to_use(
                                model, run_detectors, detector, current_run)
                            new_ws = new_ws + workspace_to_add
                add_detector_workspace_to_group(co_add_workspace, new_ws,
                                                new_ws_name, detector,
                                                finished_detectors, ws_remove)
    finalise_groupworkspace(model, co_add_workspace, co_add_workspace_name,
                            ws_remove)
Example #18
0
    def set_transmission_workspaces_on_output(self, completed_event_slices,
                                              fit_state):
        calc_can, calc_sample = WorkspaceGroup(), WorkspaceGroup()
        unfit_can, unfit_sample = WorkspaceGroup(), WorkspaceGroup()

        output_hab_or_lab = None
        for bundle in completed_event_slices:
            if output_hab_or_lab is not None and output_hab_or_lab != bundle.output_bundle.reduction_mode:
                continue  # The transmission workspace for HAB/LAB is the same, so only output one
            output_hab_or_lab = bundle.output_bundle.reduction_mode
            calculated_transmission_workspace = bundle.transmission_bundle.calculated_transmission_workspace
            unfitted_transmission_workspace = bundle.transmission_bundle.unfitted_transmission_workspace
            if bundle.transmission_bundle.data_type is DataType.CAN:
                if does_can_workspace_exist_on_ads(
                        calculated_transmission_workspace):
                    # The workspace is cloned here because the transmission runs are diagnostic output so even though
                    # the values already exist they need to be labelled separately for each reduction.
                    calculated_transmission_workspace = CloneWorkspace(
                        calculated_transmission_workspace, StoreInADS=False)
                if does_can_workspace_exist_on_ads(
                        unfitted_transmission_workspace):
                    unfitted_transmission_workspace = CloneWorkspace(
                        unfitted_transmission_workspace, StoreInADS=False)
                if calculated_transmission_workspace:
                    calc_can.addWorkspace(calculated_transmission_workspace)
                if unfitted_transmission_workspace:
                    unfit_can.addWorkspace(unfitted_transmission_workspace)

            elif bundle.transmission_bundle.data_type is DataType.SAMPLE:
                if calculated_transmission_workspace:
                    calc_sample.addWorkspace(calculated_transmission_workspace)
                if unfitted_transmission_workspace:
                    unfit_sample.addWorkspace(unfitted_transmission_workspace)
            else:
                raise RuntimeError(
                    "SANSSingleReduction: The data type {0} should be"
                    " sample or can.".format(
                        bundle.transmission_bundle.data_type))

        self._set_prop_if_group_has_data(
            "OutputWorkspaceCalculatedTransmission", calc_sample)
        self._set_prop_if_group_has_data("OutputWorkspaceUnfittedTransmission",
                                         unfit_sample)
        self._set_prop_if_group_has_data(
            "OutputWorkspaceCalculatedTransmissionCan", calc_can)
        self._set_prop_if_group_has_data(
            "OutputWorkspaceUnfittedTransmissionCan", unfit_can)
Example #19
0
 def merge_and_crop_workspaces(self, workspaces):
     """ where workspaces is a tuple of form:
             (filepath, ws name)
     """
     workspace_name = self.getPropertyValue('GroupWorkspace')
     # detectors is a dictionary of {detector_name : [names_of_workspaces]}
     detectors = {
         f"{workspace_name}; Detector {x}": []
         for x in range(1, 5)
     }
     # fill dictionary
     for workspace in workspaces:
         detector_number = workspace[0]
         detectors[f"{workspace_name}; Detector {detector_number}"].append(
             workspace)
     # initialise a group workspace
     overall_ws = WorkspaceGroup()
     # merge each workspace list in detectors into a single workspace
     for detector, workspace_list in detectors.items():
         if workspace_list:
             # sort workspace list according to type_index
             sorted_workspace_list = [None] * NUM_FILES_PER_DETECTOR
             # sort workspace list according to type_index
             for workspace in workspace_list:
                 data_type = workspace.rsplit("_")[1]
                 sorted_workspace_list[SPECTRUM_INDEX[data_type] -
                                       1] = workspace
             workspace_list = sorted_workspace_list
             # create merged workspace
             merged_ws = self.create_merged_workspace(workspace_list)
             ConvertToHistogram(InputWorkspace=merged_ws,
                                OutputWorkspace=detector)
             minX, maxX = [], []
             ws = AnalysisDataService.retrieve(detector)
             for i in range(ws.getNumberHistograms()):
                 xdata = ws.readX(i)
                 minX.append(xdata[0])
                 if i == 2:
                     maxX.append(xdata[-1])
                 else:
                     maxX.append(xdata[-1] - 1)
             CropWorkspaceRagged(InputWorkspace=detector,
                                 OutputWorkspace=detector,
                                 xmin=minX,
                                 xmax=maxX)
             overall_ws.addWorkspace(AnalysisDataService.retrieve(detector))
     self.setProperty("GroupWorkspace", overall_ws)
Example #20
0
 def test_add_detector_workspace_to_group(self):
     """
         The test will fail if it cannot delete a workspace because it does not exist
     """
     grp_ws = WorkspaceGroup()
     new_ws = CreateSampleWorkspace()
     new_ws_name = 'Test Workspace Name'
     detector = 'Detector 3'
     finished_detectors = []
     ws_list = []
     load_utils_ea.add_detector_workspace_to_group(grp_ws, new_ws,
                                                   new_ws_name, detector,
                                                   finished_detectors,
                                                   ws_list)
     self.assertEqual(ws_list, ['Test Workspace Name'])
     self.assertEqual(finished_detectors, ['Detector 3'])
     self.assertEqual(grp_ws.getNames()[0], 'Test Workspace Name')
Example #21
0
    def set_output_workspaces(self,
                              workflow_outputs: SANSWorkflowAlgorithmOutputs):
        """
        Sets the output workspaces which can be HAB, LAB or Merged.

        At this step we also provide a workspace name to the sample logs which can be used later on for saving
        :param workflow_outputs:  collection of wavelength sliced and reduced workspaces
        """
        # Note that this breaks the flexibility that we have established with the reduction mode. We have not hardcoded
        # HAB or LAB anywhere which means that in the future there could be other detectors of relevance. Here we
        # reference HAB and LAB directly since we currently don't want to rely on dynamic properties. See also in PyInit

        merged, lab, hab, scaled = WorkspaceGroup(), WorkspaceGroup(
        ), WorkspaceGroup(), WorkspaceGroup()

        for ws in workflow_outputs.lab_output:
            lab.addWorkspace(ws)

        for ws in workflow_outputs.hab_output:
            hab.addWorkspace(ws)

        for ws in workflow_outputs.merged_output:
            merged.addWorkspace(ws)

        for ws in workflow_outputs.scaled_hab_output:
            scaled.addWorkspace(ws)

        self._set_prop_if_group_has_data("OutputWorkspaceLAB", lab)
        self._set_prop_if_group_has_data("OutputWorkspaceHAB", hab)
        self._set_prop_if_group_has_data("OutputWorkspaceHABScaled", scaled)
        self._set_prop_if_group_has_data("OutputWorkspaceMerged", merged)
Example #22
0
    def set_reduced_can_workspace_on_output(self, output_bundles):
        """
        Sets the reduced can group workspaces on the output properties.
        The reduced can workspaces can be:
        LAB Can or
        HAB Can

        :param output_bundles: a list of output bundles
        """
        workspace_group_lab_can = WorkspaceGroup()
        workspace_group_hab_can = WorkspaceGroup()
        # Find the LAB Can and HAB Can entries if they exist
        for component_bundle in output_bundles:
            for output_bundle in component_bundle:
                if output_bundle.data_type is DataType.Can:
                    reduction_mode = output_bundle.reduction_mode
                    output_workspace = output_bundle.output_workspace
                    # Make sure that the output workspace is not None which can be the case if there has never been a
                    # can set for the reduction.
                    if output_workspace is not None and not does_can_workspace_exist_on_ads(
                            output_workspace):
                        name = self._get_output_workspace_name(
                            output_bundle.state,
                            output_bundle.reduction_mode,
                            can=True)
                        AnalysisDataService.addOrReplace(
                            name, output_workspace)
                        if reduction_mode is ISISReductionMode.LAB:
                            workspace_group_lab_can.addWorkspace(
                                output_workspace)
                        elif reduction_mode is ISISReductionMode.HAB:
                            workspace_group_hab_can.addWorkspace(
                                output_workspace)
                        else:
                            raise RuntimeError(
                                "SANSSingleReduction: The reduction mode {0} should not"
                                " be set with a can.".format(reduction_mode))
        if workspace_group_lab_can.size() > 0:
            # LAB group workspace is non-empty, so we want to set it as output
            self.setProperty("OutputWorkspaceLABCan", workspace_group_lab_can)
        if workspace_group_hab_can.size() > 0:
            self.setProperty("OutputWorkspaceHABCan", workspace_group_hab_can)
 def create_group_workspace_and_load(self):
     grpws = WorkspaceGroup()
     ws_detector1 = '9999; Detector 1'
     grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector1))
     ws_detector2 = '9999; Detector 2'
     grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector2))
     ws_detector3 = '9999; Detector 3'
     grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector3))
     run = 9999
     self.context.data_context._loaded_data.add_data(run=[run],
                                                     workspace=grpws)
     loadedData = self.context.data_context._loaded_data
     self.context.group_context.reset_group_to_default(loadedData)
Example #24
0
    def set_reduced_can_count_and_norm_on_output(self, completed_event_slices):
        """
        Sets the reduced can count and norm group workspaces on the output properties.
        This includes the HAB/LAB counts and Norms

        :param completed_event_slices: a list containing a single list of output bundle parts
        """
        # Find the partial output bundles fo LAB Can and HAB Can if they exist
        lab_can_counts, hab_can_counts = WorkspaceGroup(), WorkspaceGroup()
        lab_can_norms, hab_can_norms = WorkspaceGroup(), WorkspaceGroup()

        for bundle in completed_event_slices:
            if bundle.output_bundle.data_type is DataType.CAN:
                reduction_mode = bundle.parts_bundle.reduction_mode
                output_workspace_count = bundle.parts_bundle.output_workspace_count
                output_workspace_norm = bundle.parts_bundle.output_workspace_norm
                # Make sure that the output workspace is not None which can be the case if there has never been a
                # can set for the reduction.
                if output_workspace_norm is not None and output_workspace_count is not None and \
                        not does_can_workspace_exist_on_ads(output_workspace_norm) and \
                        not does_can_workspace_exist_on_ads(output_workspace_count):
                    if reduction_mode is ReductionMode.LAB:
                        lab_can_counts.addWorkspace(output_workspace_count)
                        lab_can_norms.addWorkspace(output_workspace_norm)
                    elif reduction_mode is ReductionMode.HAB:
                        hab_can_counts.addWorkspace(output_workspace_count)
                        hab_can_norms.addWorkspace(output_workspace_norm)
                    else:
                        raise RuntimeError(
                            "SANSSingleReduction: The reduction mode {0} should not"
                            " be set with a partial can.".format(
                                reduction_mode))

        self._set_prop_if_group_has_data("OutputWorkspaceLABCanCount",
                                         lab_can_counts)
        self._set_prop_if_group_has_data("OutputWorkspaceLABCanNorm",
                                         lab_can_norms)
        self._set_prop_if_group_has_data("OutputWorkspaceHABCanCount",
                                         hab_can_counts)
        self._set_prop_if_group_has_data("OutputWorkspaceHABCanNorm",
                                         hab_can_norms)
Example #25
0
    def _create_workspace_group_to_store_combination_workspaces(self) -> WorkspaceGroup:
        """Return the Workspace Group used to store the different parameter combination matrix workspaces."""
        group_name = self.parameter_combination_group_name()
        if group_name is None:
            return None

        if check_if_workspace_exist(group_name):
            workspace_group = retrieve_ws(group_name)
        else:
            workspace_group = WorkspaceGroup()
            add_ws_to_ads(group_name, workspace_group)
        return workspace_group
Example #26
0
    def set_output_workspaces(self, reduction_mode_vs_output_workspaces,
                              reduction_mode_vs_workspace_names):
        """
        Sets the output workspaces which can be HAB, LAB or Merged.

        At this step we also provide a workspace name to the sample logs which can be used later on for saving
        :param reduction_mode_vs_output_workspaces:  map from reduction mode to output workspace
        :param reduction_mode_vs_workspace_names: an unused dict. Required for version 2 compatibility
        """
        # Note that this breaks the flexibility that we have established with the reduction mode. We have not hardcoded
        # HAB or LAB anywhere which means that in the future there could be other detectors of relevance. Here we
        # reference HAB and LAB directly since we currently don't want to rely on dynamic properties. See also in PyInit

        merged, lab, hab = WorkspaceGroup(), WorkspaceGroup(), WorkspaceGroup()

        for reduction_mode, output_workspace_list in reduction_mode_vs_output_workspaces.items(
        ):
            for output_workspace in output_workspace_list:
                if reduction_mode is ReductionMode.MERGED:
                    merged.addWorkspace(output_workspace)
                elif reduction_mode is ReductionMode.LAB:
                    lab.addWorkspace(output_workspace)
                elif reduction_mode is ReductionMode.HAB:
                    hab.addWorkspace(output_workspace)
                else:
                    raise RuntimeError(
                        "SANSSingleReduction: Cannot set the output workspace. The selected reduction "
                        "mode {0} is unknown.".format(reduction_mode))

        self._set_prop_if_group_has_data("OutputWorkspaceLAB", lab)
        self._set_prop_if_group_has_data("OutputWorkspaceHAB", hab)
        self._set_prop_if_group_has_data("OutputWorkspaceMerged", merged)
Example #27
0
def combine_spatial_banks(tables: WorkspaceGroup,
                          table_type: str = 'calibration',
                          name: Optional[str] = None) -> TableWorkspace:
    """
    Function that inputs a GroupWorkspace of TableWorkspace .

    :param tables: input GroupWorkspace with independent bank TableWorkspace for Corelli
    :param table_type: input type of table. One of ('calibration', 'mask')
    :param name: input name for the TableWorkSpace output
    :return unified TableWorkspace for all banks
    """
    message = f'{"Cannot process Corelli combine_spatial_banks, input is not of type WorkspaceGroup"}'
    assert isinstance(tables, WorkspaceGroup), message

    combined_table: TableWorkspace = init_corelli_table(table_type=table_type, name=name)\
        if name else init_corelli_table(table_type=table_type)

    for i in range(tables.getNumberOfEntries()):
        table = tables.getItem(i)

        # check type
        message = f'Cannot process Corelli combine_spatial_banks, table ' + str(
            i) + ' is not of type TableWorkspace'
        assert isinstance(table, TableWorkspace), message

        # check column names
        if not has_valid_columns(table, table_type=table_type):
            message = f'Table index {i} of type {table_type }is not a valid Corelli TableWorkspace'
            raise RuntimeError(message)

        table_dict = table.toDict()

        # loop through rows
        column_names = table.getColumnNames(
        )  # 'Detector ID' and 'Detector Y Coordinate' for 'calibration' table_type
        for r in range(0, table.rowCount()):
            combined_table.addRow(
                [table_dict[name][r] for name in column_names])

    return combined_table
Example #28
0
 def set_can_and_sam_on_output(self, completed_event_slices):
     """
     Sets the reduced can and sample workspaces.
     These is the LAB/HAB can and sample
     Cans are also output for optimization, so check for double output.
     :param output_bundles: a list containing a single list of output_bundles
     """
     lab_cans, hab_cans = WorkspaceGroup(), WorkspaceGroup()
     lab_samples, hab_samples = WorkspaceGroup(), WorkspaceGroup()
     for bundle in completed_event_slices:
         reduction_mode = bundle.output_bundle.reduction_mode
         output_workspace = bundle.output_bundle.output_workspace
         if bundle.output_bundle.data_type is DataType.CAN:
             if output_workspace is not None and not does_can_workspace_exist_on_ads(
                     output_workspace):
                 if reduction_mode is ReductionMode.LAB:
                     lab_cans.addWorkspace(output_workspace)
                 elif reduction_mode is ReductionMode.HAB:
                     hab_cans.addWorkspace(output_workspace)
                 else:
                     raise RuntimeError(
                         "SANSSingleReduction: The reduction mode {0} should not"
                         " be set with a can.".format(reduction_mode))
         elif bundle.output_bundle.data_type is DataType.SAMPLE:
             if output_workspace is not None and not does_can_workspace_exist_on_ads(
                     output_workspace):
                 if reduction_mode is ReductionMode.LAB:
                     lab_samples.addWorkspace(output_workspace)
                 elif reduction_mode is ReductionMode.HAB:
                     hab_samples.addWorkspace(output_workspace)
                 else:
                     raise RuntimeError(
                         "SANSSingleReduction: The reduction mode {0} should not"
                         " be set with a sample.".format(reduction_mode))
     self._set_prop_if_group_has_data("OutputWorkspaceLABCan", lab_cans)
     self._set_prop_if_group_has_data("OutputWorkspaceHABCan", hab_cans)
     self._set_prop_if_group_has_data("OutputWorkspaceLABSample",
                                      lab_samples)
     self._set_prop_if_group_has_data("OutputWorkspaceHABSample",
                                      hab_samples)
Example #29
0
    def test_find_ws_to_use(self):
        run = 5555
        detector = 'Detector 3'
        run_detectors = [
            'Detector 1', 'Detector 2', 'Detector 3', 'Detector 4'
        ]

        grpws = WorkspaceGroup()
        ws_detector1 = CreateSampleWorkspace()
        grpws.addWorkspace(ws_detector1)
        ws_detector2 = CreateSampleWorkspace()
        grpws.addWorkspace(ws_detector2)
        ws_detector3 = CreateSampleWorkspace()
        grpws.addWorkspace(ws_detector3)
        ws_detector4 = CreateSampleWorkspace()
        grpws.addWorkspace(ws_detector4)

        self.model._loaded_data_store.add_data(run=[run], workspace=grpws)

        ws = load_utils_ea.find_ws_to_use(self.model, run_detectors, detector,
                                          run)
        self.assertEqual(ws.name(), 'ws_detector3')
Example #30
0
 def create_group_workspace_and_load(self):
     grpws = WorkspaceGroup()
     ws_detector1 = '9999; Detector 1'
     grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector1))
     ws_detector2 = '9999; Detector 2'
     grpws.addWorkspace(CreateSampleWorkspace(OutputWorkspace=ws_detector2))
     run = 9999
     self.loadedData.add_data(run=[run], workspace=grpws)
Example #31
0
    def add_copy_to_ads(self) -> None:
        """Adds the output workspaces for this fit to the ADS. Must be a copy so the fit history remains a history."""
        workspace_group = WorkspaceGroup()
        add_ws_to_ads(self.output_group_name(), workspace_group)

        for output_workspace in self.output_workspaces:
            add_ws_to_ads(output_workspace.workspace_name,
                          output_workspace.workspace_copy())
            workspace_group.add(output_workspace.workspace_name)

        add_ws_to_ads(self.parameter_workspace.workspace_name,
                      self.parameter_workspace.workspace_copy())
        workspace_group.add(self.parameter_workspace.workspace_name)

        add_ws_to_ads(self.covariance_workspace.workspace_name,
                      self.covariance_workspace.workspace_copy())
        workspace_group.add(self.covariance_workspace.workspace_name)
def create_test_workspacegroup(group_name=None, size=None, items=None):
    if size is not None and items is not None:
        raise ValueError("Provide either size or items not both.")

    group_name = group_name if group_name is not None else 'fitting_context_testgroup'
    group = WorkspaceGroup()
    if size is not None:
        for i in range(size):
            ws_name = '{}_{}'.format(group_name, i)
            fake_ws = create_test_workspace(ws_name)
            group.addWorkspace(fake_ws)
    elif items is not None:
        for item in items:
            group.addWorkspace(item)

    ads = AnalysisDataService.Instance()
    ads.addOrReplace(group_name, group)
    return group