Exemple #1
0
    def _perform_initial_move(self, workspaces, state):
        move_name = "SANSMove"
        state_dict = state.property_manager

        zero_options = {"SANSState": state_dict,
                        "MoveType": "SetToZero",
                        "Component": ""}
        zero_alg = create_child_algorithm(self, move_name, **zero_options)

        move_options = {"SANSState": state_dict,
                        "MoveType": "InitialMove"}
        move_alg = create_child_algorithm(self, move_name, **move_options)

        # The workspaces are stored in a dict: workspace_names (sample_scatter, etc) : ListOfWorkspaces
        for key, workspace_list in workspaces.items():
            if SANSDataType.to_string(key) in ("SampleTransmission", "CanTransmission", "CanDirect", "SampleDirect"):
                is_trans = True
            else:
                is_trans = False
            move_alg.setProperty("IsTransmissionWorkspace", is_trans)
            for workspace in workspace_list:
                zero_alg.setProperty("Workspace", workspace)
                zero_alg.execute()
                zeroed_workspace = zero_alg.getProperty("Workspace").value

                # If beam centre was specified then use it
                beam_coordinates = self.getProperty("BeamCoordinates").value
                if beam_coordinates:
                    move_alg.setProperty("BeamCoordinates", beam_coordinates)

                # ZOOM and LARMOR only have LAB, SANS2D and LOQ move both at once.
                move_alg.setProperty("Component", "LAB")
                move_alg.setProperty("Workspace", zeroed_workspace)
                move_alg.execute()
Exemple #2
0
    def _perform_initial_move(self, workspaces, state):
        move_name = "SANSMove"
        state_dict = state.property_manager

        zero_options = {"SANSState": state_dict,
                        "MoveType": "SetToZero",
                        "Component": ""}
        zero_alg = create_child_algorithm(self, move_name, **zero_options)

        move_options = {"SANSState": state_dict,
                        "MoveType": "InitialMove"}
        move_alg = create_child_algorithm(self, move_name, **move_options)

        # The workspaces are stored in a dict: workspace_names (sample_scatter, etc) : ListOfWorkspaces
        for key, workspace_list in list(workspaces.items()):
            if SANSDataType.to_string(key) in ("SampleTransmission", "CanTransmission", "CanDirect", "SampleDirect"):
                is_trans = True
            else:
                is_trans = False
            move_alg.setProperty("IsTransmissionWorkspace", is_trans)
            for workspace in workspace_list:
                zero_alg.setProperty("Workspace", workspace)
                zero_alg.execute()
                zeroed_workspace = zero_alg.getProperty("Workspace").value

                # If beam centre was specified then use it
                beam_coordinates = self.getProperty("BeamCoordinates").value
                if beam_coordinates:
                    move_alg.setProperty("BeamCoordinates", beam_coordinates)

                # ZOOM and LARMOR only have LAB, SANS2D and LOQ move both at once.
                move_alg.setProperty("Component", "LAB")
                move_alg.setProperty("Workspace", zeroed_workspace)
                move_alg.execute()
    def _check_compatibility_mode(self, workspace, monitor_workspace, compatibility):
        is_event_workspace = isinstance(workspace, IEventWorkspace)
        use_dummy_workspace = False
        dummy_mask_workspace = None
        if is_event_workspace:
            if compatibility.use_compatibility_mode:
                # We convert the workspace here to a histogram workspace, since we cannot otherwise
                # compare the results between the old and the new reduction workspace in a meaningful manner.
                # The old one is histogram and the new one is event.
                # Rebin to monitor workspace
                if compatibility.time_rebin_string:
                    rebin_name = "Rebin"
                    rebin_option = {"InputWorkspace": workspace,
                                    "Params": compatibility.time_rebin_string,
                                    "OutputWorkspace": EMPTY_NAME,
                                    "PreserveEvents": False}
                    rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
                    rebin_alg.execute()
                    workspace = rebin_alg.getProperty("OutputWorkspace").value
                else:
                    rebin_name = "RebinToWorkspace"
                    rebin_option = {"WorkspaceToRebin": workspace,
                                    "WorkspaceToMatch": monitor_workspace,
                                    "OutputWorkspace": EMPTY_NAME,
                                    "PreserveEvents": False}
                    rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
                    rebin_alg.execute()
                    workspace = rebin_alg.getProperty("OutputWorkspace").value
            else:
                # If not using compatibility mode, we create a histogram from the workspace, which will store
                # the bin masking.
                # Extract a single spectrum to make operations as quick as possible.
                # We only need the mask flags, not the y data.
                use_dummy_workspace = True

                # Extract only a single spectrum so dummy workspace which contains bin masks is a small as possible
                # (cheaper operations).
                # This is find because we only care about the mask flags in this workspace, not the y data.
                extract_spectrum_name = "ExtractSingleSpectrum"
                extract_spectrum_option = {"InputWorkspace": workspace,
                                           "OutputWorkspace": "dummy_mask_workspace",
                                           "WorkspaceIndex": 0}
                extract_spectrum_alg = create_child_algorithm(self, extract_spectrum_name, **extract_spectrum_option)
                extract_spectrum_alg.execute()
                dummy_mask_workspace = extract_spectrum_alg.getProperty("OutputWorkspace").value

                rebin_name = "RebinToWorkspace"
                rebin_option = {"WorkspaceToRebin": dummy_mask_workspace,
                                "WorkspaceToMatch": monitor_workspace,
                                "OutputWorkspace": "dummy_mask_workspace",
                                "PreserveEvents": False}
                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
                rebin_alg.execute()
                dummy_mask_workspace = rebin_alg.getProperty("OutputWorkspace").value
        return workspace, dummy_mask_workspace, use_dummy_workspace
    def _convert_to_wavelength(self, workspace, wavelength_state) -> WsList:
        wavelength_name = "SANSConvertToWavelengthAndRebin"

        processed = {}
        assert (len(wavelength_state.wavelength_interval.selected_ranges) > 0)
        for wav_range in wavelength_state.wavelength_interval.selected_ranges:
            wavelength_options = {
                "InputWorkspace":
                workspace,
                "OutputWorkspace":
                EMPTY_NAME,
                "WavelengthLow":
                wav_range[0],
                "WavelengthHigh":
                wav_range[1],
                "WavelengthStep":
                wavelength_state.wavelength_interval.wavelength_step,
                "WavelengthStepType":
                wavelength_state.wavelength_step_type_lin_log.value,
                "RebinMode":
                wavelength_state.rebin_type.value
            }

            wavelength_alg = create_child_algorithm(self, wavelength_name,
                                                    **wavelength_options)
            wavelength_alg.execute()
            # The JSON serialiser will convert tuples->lists which are unhashable, so cast back
            processed[tuple(wav_range)] = wavelength_alg.getProperty(
                "OutputWorkspace").value
        return processed
Exemple #5
0
def apply_missing_parameters(calibration_workspace, workspace, missing_parameters, parent_alg):
    """
    Transfers missing properties from the data workspace to the calibration workspace.

    :param calibration_workspace: the calibration workspace.
    :param workspace: the data workspace.
    :param missing_parameters: a list of missing parameters which exist on the data workspace but not on the calibration
                               workspace.
    :param parent_alg: a handle to the parent algorithm
    """
    instrument = workspace.getInstrument()
    component_name = instrument.getName()
    component_name = sanitise_instrument_name(component_name)
    set_instrument_name = "SetInstrumentParameter"
    set_instrument_parameter_options = {"Workspace": calibration_workspace,
                                        "ComponentName": component_name}
    alg = create_child_algorithm(parent_alg, set_instrument_name, **set_instrument_parameter_options)

    # For now only string, int and double are handled
    type_options = {"string": "String", "int": "Number", "double": "Number"}
    value_options = {"string": instrument.getStringParameter,
                     "int": instrument.getIntParameter,
                     "double": instrument.getNumberParameter}
    try:
        for missing_parameter in missing_parameters:
            parameter_type = instrument.getParameterType(missing_parameter)
            type_to_save = type_options[parameter_type]
            value = value_options[parameter_type](missing_parameter)

            alg.setProperty("ParameterName", missing_parameter)
            alg.setProperty("ParameterType", type_to_save)
            alg.setProperty("Value", str(value[0]))
    except KeyError:
        raise RuntimeError("SANSCalibration: An Instrument Parameter File value of unknown type"
                           "was going to be copied. Cannot handle this currently.")
Exemple #6
0
    def correct(self, workspaces, parent_alg):
        """
        For LOQ we want to apply a different instrument definition for the transmission runs.

        :param workspaces: a dictionary of data types, e.g. SampleScatter vs. a workspace
        :param parent_alg: a handle to the parent algorithm
        """
        # Get the transmission and the direct workspaces and apply the correction to them
        workspace_which_require_transmission_correction = []
        for data_type, _ in list(workspaces.items()):
            if is_transmission_type(data_type):
                workspace_which_require_transmission_correction.append(workspaces[data_type])

        # We want to apply a different instrument for the transmission runs

        for workspace in workspace_which_require_transmission_correction:
            assert len(workspace) == 1
            workspace = workspace[0]
            instrument = workspace.getInstrument()
            has_m4 = instrument.getComponentByName("monitor4")
            if has_m4 is None:
                trans_definition_file = os.path.join(config.getString('instrumentDefinition.directory'),
                                                     'LOQ_trans_Definition.xml')
            else:
                trans_definition_file = os.path.join(config.getString('instrumentDefinition.directory'),
                                                     'LOQ_trans_Definition_M4.xml')
            # Done
            instrument_name = "LoadInstrument"
            instrument_options = {"Workspace": workspace,
                                  "Filename": trans_definition_file,
                                  "RewriteSpectraMap": False}
            instrument_alg = create_child_algorithm(parent_alg, instrument_name, **instrument_options)
            instrument_alg.execute()
 def _get_cloned_workspace(self, workspace):
     clone_name = "CloneWorkspace"
     clone_options = {"InputWorkspace": workspace,
                      "OutputWorkspace": EMPTY_NAME}
     clone_alg = create_child_algorithm(self, clone_name, **clone_options)
     clone_alg.execute()
     return clone_alg.getProperty("OutputWorkspace").value
    def merge(self, reduction_mode_vs_output_bundles, parent_alg=None):
        """
        Merges two partial reductions to obtain a merged reduction.

        :param reduction_mode_vs_output_bundles: a ReductionMode vs OutputBundle map
        :param parent_alg: a handle to the parent algorithm.
        :return: a MergeBundle with the merged which contains the merged workspace.
        """
        # Get the primary and secondary detectors for stitching. This is normally LAB and HAB, but in other scenarios
        # there might be completely different detectors. This approach allows future adjustments to the stitching
        # configuration. The data from the secondary detector will be stitched to the data from the primary detector.

        primary_detector, secondary_detector = get_detectors_for_merge(reduction_mode_vs_output_bundles)
        sample_count_primary, sample_norm_primary, sample_count_secondary, sample_norm_secondary = \
            get_partial_workspaces(primary_detector, secondary_detector, reduction_mode_vs_output_bundles, is_sample)

        # Get the relevant workspaces from the reduction settings. For this we need to first understand what the
        can_count_primary, can_norm_primary, can_count_secondary, can_norm_secondary = \
            get_partial_workspaces(primary_detector, secondary_detector, reduction_mode_vs_output_bundles, is_can)

        # Get fit parameters
        shift_factor, scale_factor, fit_mode = get_shift_and_scale_parameter(reduction_mode_vs_output_bundles)
        fit_mode_as_string = FitModeForMerge.to_string(fit_mode)

        # We need to convert NoFit to None.
        if fit_mode_as_string == "NoFit":
            fit_mode_as_string = "None"

        # Run the SANSStitch algorithm
        stitch_name = "SANSStitch"
        stitch_options = {"HABCountsSample": sample_count_secondary,
                          "HABNormSample": sample_norm_secondary,
                          "LABCountsSample": sample_count_primary,
                          "LABNormSample": sample_norm_primary,
                          "ProcessCan": False,
                          "Mode": fit_mode_as_string,
                          "ScaleFactor": scale_factor,
                          "ShiftFactor": shift_factor,
                          "OutputWorkspace": "dummy"}

        if can_count_primary is not None and can_norm_primary is not None \
                and can_count_secondary is not None and can_norm_secondary is not None:
            stitch_options_can = {"HABCountsCan": can_count_secondary,
                                  "HABNormCan": can_norm_secondary,
                                  "LABCountsCan": can_count_primary,
                                  "LABNormCan": can_norm_primary,
                                  "ProcessCan": True}
            stitch_options.update(stitch_options_can)

        stitch_alg = create_child_algorithm(parent_alg, stitch_name, **stitch_options)
        stitch_alg.execute()

        # Get the fit values
        shift_from_alg = stitch_alg.getProperty("OutShiftFactor").value
        scale_from_alg = stitch_alg.getProperty("OutScaleFactor").value
        merged_workspace = stitch_alg.getProperty("OutputWorkspace").value

        # Return a merge bundle with the merged workspace and the fitted scale and shift factor (they are good
        # diagnostic tools which are desired by the instrument scientists.
        return MergeBundle(merged_workspace=merged_workspace, shift=shift_from_alg, scale=scale_from_alg)
    def _convert_to_q(self, state_serialized, workspace, wavelength_adjustment_workspace, pixel_adjustment_workspace,
                      wavelength_and_pixel_adjustment_workspace):
        """
        A conversion to momentum transfer is performed in this step.

        The conversion can be either to the modulus of Q in which case the output is a 1D workspace, or it can
        be a 2D reduction where the y axis is Qy, ie it is a numeric axis.
        @param state: a SANSState object
        @param workspace: the workspace to convert to momentum transfer.
        @param wavelength_adjustment_workspace: the wavelength adjustment workspace.
        @param pixel_adjustment_workspace: the pixel adjustment workspace.
        @param wavelength_and_pixel_adjustment_workspace: the wavelength and pixel adjustment workspace.
        @return: a reduced workspace
        """
        convert_name = "SANSConvertToQ"
        convert_options = {"InputWorkspace": workspace,
                           "OutputWorkspace": EMPTY_NAME,
                           "SANSState": state_serialized,
                           "OutputParts": True}
        if wavelength_adjustment_workspace:
            convert_options.update({"InputWorkspaceWavelengthAdjustment": wavelength_adjustment_workspace})
        if pixel_adjustment_workspace:
            convert_options.update({"InputWorkspacePixelAdjustment": pixel_adjustment_workspace})
        if wavelength_and_pixel_adjustment_workspace:
            convert_options.update({"InputWorkspaceWavelengthAndPixelAdjustment":
                                    wavelength_and_pixel_adjustment_workspace})
        convert_alg = create_child_algorithm(self, convert_name, **convert_options)
        convert_alg.execute()
        data_workspace = convert_alg.getProperty("OutputWorkspace").value
        sum_of_counts = convert_alg.getProperty("SumOfCounts").value
        sum_of_norms = convert_alg.getProperty("SumOfNormFactors").value
        return data_workspace, sum_of_counts, sum_of_norms
    def _adjustment(self, state_serialized, workspace, monitor_workspace, component_as_string, data_type):
        transmission_workspace = self._get_transmission_workspace()
        direct_workspace = self._get_direct_workspace()

        adjustment_name = "SANSCreateAdjustmentWorkspaces"
        adjustment_options = {"SANSState": state_serialized,
                              "Component": component_as_string,
                              "DataType": data_type,
                              "MonitorWorkspace": monitor_workspace,
                              "SampleData": workspace,
                              "OutputWorkspaceWavelengthAdjustment": EMPTY_NAME,
                              "OutputWorkspacePixelAdjustment": EMPTY_NAME,
                              "OutputWorkspaceWavelengthAndPixelAdjustment": EMPTY_NAME}
        if transmission_workspace:
            transmission_workspace = self._move(state_serialized, transmission_workspace, component_as_string,
                                                is_transmission=True)
            adjustment_options.update({"TransmissionWorkspace": transmission_workspace})

        if direct_workspace:
            direct_workspace = self._move(state_serialized, direct_workspace, component_as_string, is_transmission=True)
            adjustment_options.update({"DirectWorkspace": direct_workspace})

        adjustment_alg = create_child_algorithm(self, adjustment_name, **adjustment_options)
        adjustment_alg.execute()

        wavelength_adjustment = adjustment_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
        pixel_adjustment = adjustment_alg.getProperty("OutputWorkspacePixelAdjustment").value
        wavelength_and_pixel_adjustment = adjustment_alg.getProperty(
                                           "OutputWorkspaceWavelengthAndPixelAdjustment").value
        calculated_transmission_workspace = adjustment_alg.getProperty("CalculatedTransmissionWorkspace").value
        unfitted_transmission_workspace = adjustment_alg.getProperty("UnfittedTransmissionWorkspace").value
        return wavelength_adjustment, pixel_adjustment, wavelength_and_pixel_adjustment, \
            calculated_transmission_workspace, unfitted_transmission_workspace
Exemple #11
0
def perform_can_subtraction(sample, can, parent_alg):
    """
    Subtracts the can from the sample workspace.

    We need to manually take care of the q resolution issue here.
    :param sample: the sample workspace
    :param can: the can workspace.
    :param parent_alg: a handle to the parent algorithm.
    :return: the subtracted workspace.
    """
    subtraction_name = "Minus"
    subtraction_options = {
        "LHSWorkspace": sample,
        "RHSWorkspace": can,
        "OutputWorkspace": EMPTY_NAME
    }
    subtraction_alg = create_child_algorithm(parent_alg, subtraction_name,
                                             **subtraction_options)
    subtraction_alg.execute()
    output_workspace = subtraction_alg.getProperty("OutputWorkspace").value

    # If the workspace is 1D and contains Q resolution (i.e. DX values), then we need to make sure that the
    # resulting output workspace contains the correct values
    correct_q_resolution_for_can(sample, can, output_workspace)

    return output_workspace
Exemple #12
0
    def _convert_to_wavelength(self, state, workspace):
        wavelength_state = state.wavelength
        wavelength_range = wavelength_state.wavelength_interval.wavelength_full_range

        wavelength_name = "SANSConvertToWavelengthAndRebin"
        wavelength_options = {
            "InputWorkspace":
            workspace,
            "OutputWorkspace":
            EMPTY_NAME,
            "WavelengthPairs":
            json.dumps([(wavelength_range[0], wavelength_range[1])]),
            "WavelengthStep":
            wavelength_state.wavelength_interval.wavelength_step,
            "WavelengthStepType":
            wavelength_state.wavelength_step_type_lin_log.value,
            # Non monitor/transmission data does not support interpolating rebin
            "RebinMode":
            RebinType.REBIN.value
        }

        wavelength_alg = create_child_algorithm(self, wavelength_name,
                                                **wavelength_options)
        wavelength_alg.execute()
        grouped_ws = wavelength_alg.getProperty("OutputWorkspace").value
        return grouped_ws.getItem(0)
 def _get_cloned_workspace(self, workspace):
     clone_name = "CloneWorkspace"
     clone_options = {"InputWorkspace": workspace,
                      "OutputWorkspace": EMPTY_NAME}
     clone_alg = create_child_algorithm(self, clone_name, **clone_options)
     clone_alg.execute()
     return clone_alg.getProperty("OutputWorkspace").value
Exemple #14
0
    def _convert_to_wavelength(self, workspace, wavelength_state) -> WsList:
        wavelength_name = "SANSConvertToWavelengthAndRebin"
        selected_ranges = wavelength_state.wavelength_interval.selected_ranges
        assert (len(selected_ranges) > 0)
        wavelength_options = {
            "InputWorkspace": workspace,
            "OutputWorkspace": EMPTY_NAME,
            "WavelengthPairs": json.dumps(selected_ranges),
            "WavelengthStep":
            wavelength_state.wavelength_interval.wavelength_step,
            "WavelengthStepType":
            wavelength_state.wavelength_step_type_lin_log.value,
            # No option for interpolating data is available
            "RebinMode": RebinType.REBIN.value
        }

        wavelength_alg = create_child_algorithm(self, wavelength_name,
                                                **wavelength_options)
        wavelength_alg.execute()
        grouped_ws = wavelength_alg.getProperty("OutputWorkspace").value
        assert (len(grouped_ws) == len(selected_ranges))
        processed = {
            tuple(wav_range): ws
            for wav_range, ws in zip(selected_ranges, grouped_ws)
        }
        return processed
Exemple #15
0
def extract_multi_period_event_workspace(loader, index,
                                         output_workspace_property_name,
                                         parent_alg):
    """
    Extract a single workspace from a WorkspaceGroup.

    Note that we need to perform a CloneWorkspace operation because this is the only way to get an individual workspace
    from a WorkspaceGroup. They are extremely "sticky" and using the indexed access will only provide a weak pointer
    which means that we will have a dead reference once the WorkspaceGroup goes out of scope
    :param loader: an executed LoadEventNexus algorithm
    :param index: an index variable into the GroupWorkspace, not that it is offset by 1
    :param output_workspace_property_name: the name of the output workspace property, i.e. OutputWorkspace or
                                           MonitorWorkspace
    :param parent_alg: a handle to the parent algorithm
    :return: a single workspace
    """
    group_workspace = loader.getProperty(output_workspace_property_name).value
    group_workspace_index = index - 1
    workspace_of_interest = group_workspace[group_workspace_index]

    clone_name = "CloneWorkspace"
    clone_options = {
        "InputWorkspace": workspace_of_interest,
        "OutputWorkspace": EMPTY_NAME
    }
    clone_alg = create_child_algorithm(parent_alg, clone_name, **clone_options)
    clone_alg.execute()
    return clone_alg.getProperty("OutputWorkspace").value
 def _copy_bin_masks(self, workspace, dummy_workspace):
     mask_options = {"InputWorkspace": workspace,
                     "MaskedWorkspace": dummy_workspace,
                     "OutputWorkspace": EMPTY_NAME}
     mask_alg = create_child_algorithm(self, "MaskBinsFromWorkspace", **mask_options)
     mask_alg.execute()
     return mask_alg.getProperty("OutputWorkspace").value
Exemple #17
0
    def correct(self, workspaces, parent_alg):
        """
        For LOQ we want to apply a different instrument definition for the transmission runs.

        :param workspaces: a dictionary of data types, e.g. SampleScatter vs. a workspace
        :param parent_alg: a handle to the parent algorithm
        """
        # Get the transmission and the direct workspaces and apply the correction to them
        workspace_which_require_transmission_correction = []
        for data_type, _ in list(workspaces.items()):
            if is_transmission_type(data_type):
                workspace_which_require_transmission_correction.append(workspaces[data_type])

        # We want to apply a different instrument for the transmission runs

        for workspace in workspace_which_require_transmission_correction:
            assert len(workspace) == 1
            workspace = workspace[0]
            instrument = workspace.getInstrument()
            has_m4 = instrument.getComponentByName("monitor4")
            if has_m4 is None:
                trans_definition_file = os.path.join(config.getString('instrumentDefinition.directory'),
                                                     'LOQ_trans_Definition.xml')
            else:
                trans_definition_file = os.path.join(config.getString('instrumentDefinition.directory'),
                                                     'LOQ_trans_Definition_M4.xml')
            # Done
            instrument_name = "LoadInstrument"
            instrument_options = {"Workspace": workspace,
                                  "Filename": trans_definition_file,
                                  "RewriteSpectraMap": False}
            instrument_alg = create_child_algorithm(parent_alg, instrument_name, **instrument_options)
            instrument_alg.execute()
Exemple #18
0
def loader_for_added_isis_nexus(file_information, is_transmission, period,
                                parent_alg):
    """
    Get the name and options for the load algorithm for ISIS nexus.

    :param file_information: a SANSFileInformation object.
    :param is_transmission: if the current file corresponds to transmission data
    :param period: the period to load
    :param parent_alg: a handle to the parent algorithm
    :return: the name of the load algorithm and the selected load options
    """
    _ = is_transmission  # noqa
    loader_name = "LoadNexusProcessed"
    loader_options = {
        "Filename": file_information.get_file_name(),
        "OutputWorkspace": EMPTY_NAME,
        "LoadHistory": True,
        "FastMultiPeriod": True
    }
    if period != StateData.ALL_PERIODS:
        loader_options.update({"EntryNumber": period})
    loader_alg = create_child_algorithm(parent_alg, loader_name,
                                        **loader_options)
    return run_added_loader(loader_alg, file_information, is_transmission,
                            period, parent_alg)
Exemple #19
0
def get_calibration_workspace(full_file_path, use_loaded, parent_alg):
    """
    Load the calibration workspace from the specified file

    :param full_file_path: Path to the calibration file.
    :param use_loaded: Allows us to check for the calibration file on the ADS.
    :param parent_alg: a handle to the parent algorithm
    :return: the calibration workspace.
    """
    calibration_workspace = None
    # Here we can avoid reloading of the calibration workspace
    if use_loaded:
        calibration_workspace = get_already_loaded_calibration_workspace(full_file_path)

    if calibration_workspace is None:
        if not isfile(full_file_path):
            raise RuntimeError("SANSCalibration: The file for  {0} does not seem to exist".format(full_file_path))
        loader_name = "LoadNexusProcessed"
        loader_options = {"Filename": full_file_path,
                          "OutputWorkspace": "dummy"}
        loader = create_child_algorithm(parent_alg, loader_name, **loader_options)
        loader.execute()
        calibration_workspace = loader.getProperty("OutputWorkspace").value

    return calibration_workspace
Exemple #20
0
 def _run_quartile_reduction(self, scatter_workspace, transmission_workspace, direct_workspace, data_type,
                             scatter_monitor_workspace, component, state, centre1, centre2, r_min, r_max):
     algorithm_name = "SANSBeamCentreFinderCore"
     alg_options = {"ScatterWorkspace": scatter_workspace,
                    "ScatterMonitorWorkspace": scatter_monitor_workspace,
                    "TransmissionWorkspace": transmission_workspace,
                    "DirectWorkspace": direct_workspace,
                    "Component": component,
                    "SANSState": state,
                    "DataType": data_type,
                    "Centre1": centre1,
                    "Centre2": centre2,
                    "OutputWorkspaceLeft": EMPTY_NAME,
                    "OutputWorkspaceRight": EMPTY_NAME,
                    "OutputWorkspaceTop": EMPTY_NAME,
                    "OutputWorkspaceBottom": EMPTY_NAME,
                    "RMax": r_max,
                    "RMin": r_min}
     alg = create_child_algorithm(self, algorithm_name, **alg_options)
     alg.execute()
     out_left = strip_end_nans(alg.getProperty("OutputWorkspaceLeft").value, self)
     out_right = strip_end_nans(alg.getProperty("OutputWorkspaceRight").value, self)
     out_top = strip_end_nans(alg.getProperty("OutputWorkspaceTop").value, self)
     out_bottom = strip_end_nans(alg.getProperty("OutputWorkspaceBottom").value, self)
     return {MaskingQuadrant.Left: out_left, MaskingQuadrant.Right: out_right, MaskingQuadrant.Top: out_top,
             MaskingQuadrant.Bottom: out_bottom}
    def _convert_to_q(self, state_serialized, workspace, wavelength_adjustment_workspace, pixel_adjustment_workspace,
                      wavelength_and_pixel_adjustment_workspace):
        """
        A conversion to momentum transfer is performed in this step.

        The conversion can be either to the modulus of Q in which case the output is a 1D workspace, or it can
        be a 2D reduction where the y axis is Qy, ie it is a numeric axis.
        @param state: a SANSState object
        @param workspace: the workspace to convert to momentum transfer.
        @param wavelength_adjustment_workspace: the wavelength adjustment workspace.
        @param pixel_adjustment_workspace: the pixel adjustment workspace.
        @param wavelength_and_pixel_adjustment_workspace: the wavelength and pixel adjustment workspace.
        @return: a reduced workspace
        """
        convert_name = "SANSConvertToQ"
        convert_options = {"InputWorkspace": workspace,
                           "OutputWorkspace": EMPTY_NAME,
                           "SANSState": state_serialized,
                           "OutputParts": True}
        if wavelength_adjustment_workspace:
            convert_options.update({"InputWorkspaceWavelengthAdjustment": wavelength_adjustment_workspace})
        if pixel_adjustment_workspace:
            convert_options.update({"InputWorkspacePixelAdjustment": pixel_adjustment_workspace})
        if wavelength_and_pixel_adjustment_workspace:
            convert_options.update({"InputWorkspaceWavelengthAndPixelAdjustment":
                                    wavelength_and_pixel_adjustment_workspace})
        convert_alg = create_child_algorithm(self, convert_name, **convert_options)
        convert_alg.execute()
        data_workspace = convert_alg.getProperty("OutputWorkspace").value
        sum_of_counts = convert_alg.getProperty("SumOfCounts").value
        sum_of_norms = convert_alg.getProperty("SumOfNormFactors").value
        return data_workspace, sum_of_counts, sum_of_norms
Exemple #22
0
def get_calibration_workspace(full_file_path, use_loaded, parent_alg):
    """
    Load the calibration workspace from the specified file

    :param full_file_path: Path to the calibration file.
    :param use_loaded: Allows us to check for the calibration file on the ADS.
    :param parent_alg: a handle to the parent algorithm
    :return: the calibration workspace.
    """
    calibration_workspace = None
    # Here we can avoid reloading of the calibration workspace
    if use_loaded:
        calibration_workspace = get_already_loaded_calibration_workspace(
            full_file_path)

    if calibration_workspace is None:
        if not isfile(full_file_path):
            raise RuntimeError(
                "SANSCalibration: The file for {0} does not seem to exist".
                format(full_file_path))
        loader_name = "LoadNexusProcessed"
        loader_options = {
            "Filename": full_file_path,
            "OutputWorkspace": "dummy"
        }
        loader = create_child_algorithm(parent_alg, loader_name,
                                        **loader_options)
        loader.execute()
        calibration_workspace = loader.getProperty("OutputWorkspace").value

    return calibration_workspace
    def _adjustment(self, state_serialized, workspace, monitor_workspace, component_as_string, data_type):
        transmission_workspace = self._get_transmission_workspace()
        direct_workspace = self._get_direct_workspace()

        adjustment_name = "SANSCreateAdjustmentWorkspaces"
        adjustment_options = {"SANSState": state_serialized,
                              "Component": component_as_string,
                              "DataType": data_type,
                              "MonitorWorkspace": monitor_workspace,
                              "SampleData": workspace,
                              "OutputWorkspaceWavelengthAdjustment": EMPTY_NAME,
                              "OutputWorkspacePixelAdjustment": EMPTY_NAME,
                              "OutputWorkspaceWavelengthAndPixelAdjustment": EMPTY_NAME}
        if transmission_workspace:
            transmission_workspace = self._move(state_serialized, transmission_workspace, component_as_string,
                                                is_transmission=True)
            adjustment_options.update({"TransmissionWorkspace": transmission_workspace})

        if direct_workspace:
            direct_workspace = self._move(state_serialized, direct_workspace, component_as_string, is_transmission=True)
            adjustment_options.update({"DirectWorkspace": direct_workspace})

        adjustment_alg = create_child_algorithm(self, adjustment_name, **adjustment_options)
        adjustment_alg.execute()

        wavelength_adjustment = adjustment_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
        pixel_adjustment = adjustment_alg.getProperty("OutputWorkspacePixelAdjustment").value
        wavelength_and_pixel_adjustment = adjustment_alg.getProperty(
                                           "OutputWorkspaceWavelengthAndPixelAdjustment").value
        return wavelength_adjustment, pixel_adjustment, wavelength_and_pixel_adjustment
Exemple #24
0
    def _move(self,
              state_serialized,
              workspace,
              component,
              is_transmission=False):
        # First we set the workspace to zero, since it might have been moved around by the user in the ADS
        # Second we use the initial move to bring the workspace into the correct position
        move_name = "SANSMove"
        move_options = {
            "SANSState": state_serialized,
            "Workspace": workspace,
            "MoveType": "SetToZero",
            "Component": ""
        }
        move_alg = create_child_algorithm(self, move_name, **move_options)
        move_alg.execute()
        workspace = move_alg.getProperty("Workspace").value

        # Do the initial move
        move_alg.setProperty("MoveType", "InitialMove")
        move_alg.setProperty("Component", component)
        move_alg.setProperty("Workspace", workspace)
        move_alg.setProperty("IsTransmissionWorkspace", is_transmission)
        move_alg.execute()
        return move_alg.getProperty("Workspace").value
Exemple #25
0
def apply_missing_parameters(calibration_workspace, workspace, missing_parameters, parent_alg):
    """
    Transfers missing properties from the data workspace to the calibration workspace.

    :param calibration_workspace: the calibration workspace.
    :param workspace: the data workspace.
    :param missing_parameters: a list of missing parameters which exist on the data workspace but not on the calibration
                               workspace.
    :param parent_alg: a handle to the parent algorithm
    """
    instrument = workspace.getInstrument()
    component_name = instrument.getName()
    component_name = sanitise_instrument_name(component_name)
    set_instrument_name = "SetInstrumentParameter"
    set_instrument_parameter_options = {"Workspace": calibration_workspace,
                                        "ComponentName": component_name}
    alg = create_child_algorithm(parent_alg, set_instrument_name, **set_instrument_parameter_options)

    # For now only string, int and double are handled
    type_options = {"string": "String", "int": "Number", "double": "Number"}
    value_options = {"string": instrument.getStringParameter,
                     "int": instrument.getIntParameter,
                     "double": instrument.getNumberParameter}
    try:
        for missing_parameter in missing_parameters:
            parameter_type = instrument.getParameterType(missing_parameter)
            type_to_save = type_options[parameter_type]
            value = value_options[parameter_type](missing_parameter)

            alg.setProperty("ParameterName", missing_parameter)
            alg.setProperty("ParameterType", type_to_save)
            alg.setProperty("Value", str(value[0]))
    except KeyError:
        raise RuntimeError("SANSCalibration: An Instrument Parameter File value of unknown type"
                           "was going to be copied. Cannot handle this currently.")
 def _mask(self, state_serialized, workspace, component):
     mask_name = "SANSMaskWorkspace"
     mask_options = {"SANSState": state_serialized,
                     "Workspace": workspace,
                     "Component": component}
     mask_alg = create_child_algorithm(self, mask_name, **mask_options)
     mask_alg.execute()
     return mask_alg.getProperty("Workspace").value
 def _scale(self, state_serialized, workspace):
     scale_name = "SANSScale"
     scale_options = {"SANSState": state_serialized,
                      "InputWorkspace": workspace,
                      "OutputWorkspace": EMPTY_NAME}
     scale_alg = create_child_algorithm(self, scale_name, **scale_options)
     scale_alg.execute()
     return scale_alg.getProperty("OutputWorkspace").value
 def _scale(self, state_serialized, workspace):
     scale_name = "SANSScale"
     scale_options = {"SANSState": state_serialized,
                      "InputWorkspace": workspace,
                      "OutputWorkspace": EMPTY_NAME}
     scale_alg = create_child_algorithm(self, scale_name, **scale_options)
     scale_alg.execute()
     return scale_alg.getProperty("OutputWorkspace").value
 def _mask(self, state_serialized, workspace, component):
     mask_name = "SANSMaskWorkspace"
     mask_options = {"SANSState": state_serialized,
                     "Workspace": workspace,
                     "Component": component}
     mask_alg = create_child_algorithm(self, mask_name, **mask_options)
     mask_alg.execute()
     return mask_alg.getProperty("Workspace").value
 def _get_cropped_workspace(self, component):
     scatter_workspace = self.getProperty("SampleScatterWorkspace").value
     crop_name = "SANSCrop"
     crop_options = {"InputWorkspace": scatter_workspace,
                     "OutputWorkspace": EMPTY_NAME,
                     "Component": component}
     crop_alg = create_child_algorithm(self, crop_name, **crop_options)
     crop_alg.execute()
     return crop_alg.getProperty("OutputWorkspace").value
 def _convert_to_wavelength(self, state_serialized, workspace):
     wavelength_name = "SANSConvertToWavelength"
     wavelength_options = {"SANSState": state_serialized,
                           "InputWorkspace": workspace}
     wavelength_alg = create_child_algorithm(self, wavelength_name, **wavelength_options)
     wavelength_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
     wavelength_alg.setProperty("OutputWorkspace", workspace)
     wavelength_alg.execute()
     return wavelength_alg.getProperty("OutputWorkspace").value
 def _convert_to_wavelength(self, state_serialized, workspace):
     wavelength_name = "SANSConvertToWavelength"
     wavelength_options = {"SANSState": state_serialized,
                           "InputWorkspace": workspace}
     wavelength_alg = create_child_algorithm(self, wavelength_name, **wavelength_options)
     wavelength_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
     wavelength_alg.setProperty("OutputWorkspace", workspace)
     wavelength_alg.execute()
     return wavelength_alg.getProperty("OutputWorkspace").value
 def _get_cropped_workspace(self, component):
     scatter_workspace = self.getProperty("SampleScatterWorkspace").value
     crop_name = "SANSCrop"
     crop_options = {"InputWorkspace": scatter_workspace,
                     "OutputWorkspace": EMPTY_NAME,
                     "Component": component}
     crop_alg = create_child_algorithm(self, crop_name, **crop_options)
     crop_alg.execute()
     return crop_alg.getProperty("OutputWorkspace").value
def apply_mask(state, workspace, component):
    state_serialized = state.property_manager
    mask_name = "SANSMaskWorkspace"
    mask_options = {"SANSState": state_serialized,
                    "Workspace": workspace,
                    "Component": component}
    mask_alg = create_child_algorithm('', mask_name, **mask_options)
    mask_alg.execute()
    return mask_alg.getProperty("Workspace").value
Exemple #35
0
def apply_mask(state, workspace, component):
    state_serialized = state.property_manager
    mask_name = "SANSMaskWorkspace"
    mask_options = {"SANSState": state_serialized,
                    "Workspace": workspace,
                    "Component": component}
    mask_alg = create_child_algorithm('', mask_name, **mask_options)
    mask_alg.execute()
    return mask_alg.getProperty("Workspace").value
 def set_shift_and_scale_output(self, scale_factors, shift_factors):
     create_workspace_alg = create_child_algorithm(
         self, "CreateWorkspace", **{
             "DataX": scale_factors,
             "DataY": shift_factors
         })
     create_workspace_alg.execute()
     self.setProperty(
         "OutShiftAndScaleFactor",
         create_workspace_alg.getProperty("OutputWorkspace").value)
Exemple #37
0
def loader_for_raw(file_information, is_transmission, period, parent_alg):
    """
    Get the load algorithm information for an raw file

    :param file_information: a SANSFileInformation object.
    :param is_transmission: if the workspace is a transmission workspace.
    :param period: the period to load.
    :param parent_alg: a handle to the parent algorithm
    :return: the name of the load algorithm and the selected load options.
    """
    loader_name = "LoadRaw"
    loader_options = {
        "Filename": file_information.get_file_name(),
        "OutputWorkspace": EMPTY_NAME
    }
    if is_transmission:
        loader_options.update({"LoadMonitors": "Include"})
    else:
        loader_options.update({"LoadMonitors": "Separate"})

    if period != StateData.ALL_PERIODS:
        loader_options.update({"PeriodList": period})
    loader_alg = create_child_algorithm(parent_alg, loader_name,
                                        **loader_options)
    workspaces, monitor_workspaces = run_loader(loader_alg, file_information,
                                                is_transmission, period,
                                                parent_alg)

    # Add the sample details to the loaded workspace
    sample_name = "LoadSampleDetailsFromRaw"
    sample_options = {"Filename": file_information.get_file_name()}
    sample_alg = create_child_algorithm(parent_alg, sample_name,
                                        **sample_options)

    for workspace in workspaces:
        sample_alg.setProperty("InputWorkspace", workspace)
        sample_alg.execute()

    for monitor_workspace in monitor_workspaces:
        sample_alg.setProperty("InputWorkspace", monitor_workspace)
        sample_alg.execute()

    return workspaces, monitor_workspaces
 def _copy_bin_masks(self, workspaces, dummy_workspaces):
     for wav_range in workspaces.keys():
         mask_options = {
             "InputWorkspace": workspaces[wav_range],
             "MaskedWorkspace": dummy_workspaces[wav_range],
             "OutputWorkspace": EMPTY_NAME
         }
         mask_alg = create_child_algorithm(self, "MaskBinsFromWorkspace",
                                           **mask_options)
         mask_alg.execute()
         workspaces[wav_range] = mask_alg.getProperty(
             "OutputWorkspace").value
    def _run_center_of_mass_position(self, scatter_workspace, centre1, centre2, r_min, tolerance):
        algorithm_name = "FindCenterOfMassPosition"
        alg_options = {"InputWorkspace": scatter_workspace,
                       "CenterX": centre1,
                       "CenterY": centre2,
                       "BeamRadius": r_min,
                       "Tolerance": tolerance,
                       "DirectBeam": False}
        alg = create_child_algorithm(self, algorithm_name, **alg_options)
        alg.execute()

        return alg.getProperty("CenterOfMass").value
    def _run_center_of_mass_position(self, scatter_workspace, centre1, centre2, r_min, tolerance):
        algorithm_name = "FindCenterOfMassPosition"
        alg_options = {"InputWorkspace": scatter_workspace,
                       "CenterX": centre1,
                       "CenterY": centre2,
                       "BeamRadius": r_min,
                       "Tolerance": tolerance,
                       "DirectBeam": False}
        alg = create_child_algorithm(self, algorithm_name, **alg_options)
        alg.execute()

        return alg.getProperty("CenterOfMass").value
Exemple #41
0
def strip_end_nans(workspace, parent_alg=None):
    """
    This function removes the INFs and NANs from the start and end of a 1D workspace.

    :param workspace: The workspace which is about to be
    :param parent_alg: a handle to the parent algorithm
    :return: A trimmed NAN- and INF-trimmed workspace
    """
    # If the workspace is larger than 1D, then there is nothing we can do
    if workspace is None or workspace.getNumberHistograms() != 1:
        return workspace
    data = workspace.readY(0)
    # Find the index at which the first legal value appears

    start_index = next(
        (index for index in range(len(data)) if is_valid_data(data[index])),
        None)
    end_index = next((index for index in range(len(data) - 1, -1, -1)
                      if is_valid_data(data[index])), None)

    # If an index was not found then we return the current workspace. This means that all entries are either INFs
    # or NANs.

    if start_index is None or end_index is None:
        return workspace

    # Get the corresponding Q values
    q_values = workspace.readX(0)

    start_q = q_values[start_index]

    # Make sure we're inside the bin that we want to crop. This is part of the old framework. It looks like a bug fix,
    # hence we leave it in here for now. In general this is risky, and it should be a fraction of a bin width by which
    # we increase the end value
    is_point_data = len(workspace.dataX(0)) == len(workspace.dataY(0))
    if is_point_data:
        end_q = 1.001 * q_values[end_index]
    else:
        end_q = 1.001 * q_values[end_index + 1]

    # Crop the workspace in place
    crop_name = "CropWorkspace"
    crop_options = {
        "InputWorkspace": workspace,
        "XMin": start_q,
        "XMax": end_q
    }
    crop_alg = create_child_algorithm(parent_alg, crop_name, **crop_options)
    crop_alg.setProperty("OutputWorkspace", EMPTY_NAME)
    crop_alg.execute()
    ws = crop_alg.getProperty("OutputWorkspace").value
    return ws
Exemple #42
0
def calibrate(calibration_workspace, workspace_to_calibrate, parent_alg):
    """
    Performs a calibration. The instrument parameters are copied from the calibration workspace to the data workspace.

    :param calibration_workspace: the calibration workspace
    :param workspace_to_calibrate: the workspace which has the calibration applied to it.
    :param parent_alg: a handle to the parent algorithm
    """
    copy_instrument_name = "CopyInstrumentParameters"
    copy_instrument_options = {"InputWorkspace": calibration_workspace,
                               "OutputWorkspace": workspace_to_calibrate}
    alg = create_child_algorithm(parent_alg, copy_instrument_name, **copy_instrument_options)
    alg.execute()
    def _convert_to_histogram(self, workspace):
        if isinstance(workspace, IEventWorkspace):
            convert_name = "RebinToWorkspace"
            convert_options = {"WorkspaceToRebin": workspace,
                               "WorkspaceToMatch": workspace,
                               "OutputWorkspace": "OutputWorkspace",
                               "PreserveEvents": False}
            convert_alg = create_child_algorithm(self, convert_name, **convert_options)
            convert_alg.execute()
            workspace = convert_alg.getProperty("OutputWorkspace").value
            append_to_sans_file_tag(workspace, "_histogram")

        return workspace
    def _convert_to_histogram(self, workspace):
        if isinstance(workspace, IEventWorkspace):
            convert_name = "RebinToWorkspace"
            convert_options = {"WorkspaceToRebin": workspace,
                               "WorkspaceToMatch": workspace,
                               "OutputWorkspace": "OutputWorkspace",
                               "PreserveEvents": False}
            convert_alg = create_child_algorithm(self, convert_name, **convert_options)
            convert_alg.execute()
            workspace = convert_alg.getProperty("OutputWorkspace").value
            append_to_sans_file_tag(workspace, "_histogram")

        return workspace
Exemple #45
0
def calibrate(calibration_workspace, workspace_to_calibrate, parent_alg):
    """
    Performs a calibration. The instrument parameters are copied from the calibration workspace to the data workspace.

    :param calibration_workspace: the calibration workspace
    :param workspace_to_calibrate: the workspace which has the calibration applied to it.
    :param parent_alg: a handle to the parent algorithm
    """
    copy_instrument_name = "CopyInstrumentParameters"
    copy_instrument_options = {"InputWorkspace": calibration_workspace,
                               "OutputWorkspace": workspace_to_calibrate}
    alg = create_child_algorithm(parent_alg, copy_instrument_name, **copy_instrument_options)
    alg.execute()
Exemple #46
0
def get_cloned_calibration_workspace(calibration_workspace, parent_alg):
    """
    Creates a clone from a calibration workspace, in order to consume it later.

    :param calibration_workspace: the calibration workspace which is being cloned
    :param parent_alg: a handle to the parent algorithm
    :return: a cloned calibration workspace
    """
    clone_name = "CloneWorkspace"
    clone_options = {"InputWorkspace": calibration_workspace,
                     "OutputWorkspace": EMPTY_NAME}
    alg = create_child_algorithm(parent_alg, clone_name, **clone_options)
    alg.execute()
    return alg.getProperty("OutputWorkspace").value
Exemple #47
0
def loader_for_raw(file_information, is_transmission, period, parent_alg):
    """
    Get the load algorithm information for an raw file

    :param file_information: a SANSFileInformation object.
    :param is_transmission: if the workspace is a transmission workspace.
    :param period: the period to load.
    :param parent_alg: a handle to the parent algorithm
    :return: the name of the load algorithm and the selected load options.
    """
    loader_name = "LoadRaw"
    loader_options = {"Filename": file_information.get_file_name(),
                      "OutputWorkspace": EMPTY_NAME}
    if is_transmission:
        loader_options.update({"LoadMonitors": "Include"})
    else:
        loader_options.update({"LoadMonitors": "Separate"})

    if period != StateData.ALL_PERIODS:
        loader_options.update({"PeriodList": period})
    loader_alg = create_child_algorithm(parent_alg, loader_name, **loader_options)
    workspaces, monitor_workspaces = run_loader(loader_alg, file_information, is_transmission, period, parent_alg)

    # Add the sample details to the loaded workspace
    sample_name = "LoadSampleDetailsFromRaw"
    sample_options = {"Filename": file_information.get_file_name()}
    sample_alg = create_child_algorithm(parent_alg, sample_name, **sample_options)

    for workspace in workspaces:
        sample_alg.setProperty("InputWorkspace", workspace)
        sample_alg.execute()

    for monitor_workspace in monitor_workspaces:
        sample_alg.setProperty("InputWorkspace", monitor_workspace)
        sample_alg.execute()

    return workspaces, monitor_workspaces
    def _slice(self, state_serialized, workspace, monitor_workspace, data_type_as_string):
        slice_name = "SANSSliceEvent"
        slice_options = {"SANSState": state_serialized,
                         "InputWorkspace": workspace,
                         "InputWorkspaceMonitor": monitor_workspace,
                         "OutputWorkspace": EMPTY_NAME,
                         "OutputWorkspaceMonitor": "dummy2",
                         "DataType": data_type_as_string}
        slice_alg = create_child_algorithm(self, slice_name, **slice_options)
        slice_alg.execute()

        workspace = slice_alg.getProperty("OutputWorkspace").value
        monitor_workspace = slice_alg.getProperty("OutputWorkspaceMonitor").value
        slice_event_factor = slice_alg.getProperty("SliceEventFactor").value
        return workspace, monitor_workspace, slice_event_factor
def strip_end_nans(workspace, parent_alg=None):
    """
    This function removes the INFs and NANs from the start and end of a 1D workspace.

    :param workspace: The workspace which is about to be
    :param parent_alg: a handle to the parent algorithm
    :return: A trimmed NAN- and INF-trimmed workspace
    """
    # If the workspace is larger than 1D, then there is nothing we can do
    if workspace is None or workspace.getNumberHistograms() != 1:
        return workspace
    data = workspace.readY(0)
    # Find the index at which the first legal value appears

    start_index = next((index for index in range(len(data)) if is_valid_data(data[index])), None)
    end_index = next((index for index in range(len(data)-1, -1, -1) if is_valid_data(data[index])), None)

    # If an index was not found then we return the current workspace. This means that all entries are either INFs
    # or NANs.

    if start_index is None or end_index is None:
        return workspace

    # Get the corresponding Q values
    q_values = workspace.readX(0)

    start_q = q_values[start_index]

    # Make sure we're inside the bin that we want to crop. This is part of the old framework. It looks like a bug fix,
    # hence we leave it in here for now. In general this is risky, and it should be a fraction of a bin width by which
    # we increase the end value
    is_point_data = len(workspace.dataX(0)) == len(workspace.dataY(0))
    if is_point_data:
        end_q = 1.001 * q_values[end_index]
    else:
        end_q = 1.001 * q_values[end_index + 1]

    # Crop the workspace in place
    crop_name = "CropWorkspace"
    crop_options = {"InputWorkspace": workspace,
                    "XMin": start_q,
                    "XMax": end_q}
    crop_alg = create_child_algorithm(parent_alg, crop_name, **crop_options)
    crop_alg.setProperty("OutputWorkspace", EMPTY_NAME)
    crop_alg.execute()
    ws = crop_alg.getProperty("OutputWorkspace").value
    return ws
    def _move(self, state_serialized, workspace, component, is_transmission=False):
        # First we set the workspace to zero, since it might have been moved around by the user in the ADS
        # Second we use the initial move to bring the workspace into the correct position
        move_name = "SANSMove"
        move_options = {"SANSState": state_serialized,
                        "Workspace": workspace,
                        "MoveType": "SetToZero",
                        "Component": ""}
        move_alg = create_child_algorithm(self, move_name, **move_options)
        move_alg.execute()
        workspace = move_alg.getProperty("Workspace").value

        # Do the initial move
        move_alg.setProperty("MoveType", "InitialMove")
        move_alg.setProperty("Component", component)
        move_alg.setProperty("Workspace", workspace)
        move_alg.setProperty("IsTransmissionWorkspace", is_transmission)
        move_alg.execute()
        return move_alg.getProperty("Workspace").value
Exemple #51
0
def loader_for_added_isis_nexus(file_information, is_transmission, period, parent_alg):
    """
    Get the name and options for the load algorithm for ISIS nexus.

    :param file_information: a SANSFileInformation object.
    :param is_transmission: if the current file corresponds to transmission data
    :param period: the period to load
    :param parent_alg: a handle to the parent algorithm
    :return: the name of the load algorithm and the selected load options
    """
    _ = is_transmission  # noqa
    loader_name = "LoadNexusProcessed"
    loader_options = {"Filename": file_information.get_file_name(),
                      "OutputWorkspace": EMPTY_NAME,
                      "LoadHistory": True,
                      "FastMultiPeriod": True}
    if period != StateData.ALL_PERIODS:
        loader_options.update({"EntryNumber": period})
    loader_alg = create_child_algorithm(parent_alg, loader_name, **loader_options)
    return run_added_loader(loader_alg, file_information, is_transmission, period, parent_alg)
Exemple #52
0
def loader_for_isis_nexus(file_information, is_transmission, period, parent_alg):
    """
    Get name and the options for the loading algorithm.

    This takes a SANSFileInformation object and provides the inputs for the adequate loading strategy.
    :param file_information: a SANSFileInformation object.
    :param is_transmission: if the workspace is a transmission workspace.
    :param period: the period to load.
    :param parent_alg: a handle to the parent algorithm
    :return: the name of the load algorithm and the selected load options.
    """
    loader_options = {"Filename": file_information.get_file_name(),
                      "OutputWorkspace": EMPTY_NAME}
    if file_information.is_event_mode() and not is_transmission:
        loader_name = "LoadEventNexus"
        # Note that currently we don't have a way to only load one monitor
        loader_options.update({"LoadMonitors": True})
    elif not file_information.is_event_mode() and not is_transmission:
        loader_name = "LoadISISNexus"
        loader_options.update({"LoadMonitors": "Separate",
                               "EntryNumber": 0})
        if period != StateData.ALL_PERIODS:
            loader_options.update({"EntryNumber": period})
    elif file_information.is_event_mode() and is_transmission:
        # We have the rare case of an event file which is used for transmission calculations. In this case
        # we only extract the monitors
        loader_name = "LoadNexusMonitors"
    else:
        # We must be dealing with a transmission file, we need to load the whole file.
        # The file itself will most of the time only contain monitors anyway, but sometimes the detector
        # is used as a sort of monitor, hence we cannot sort out the monitors.
        loader_name = "LoadISISNexus"
        loader_options.update({"LoadMonitors": "Include",
                               "EntryNumber": 0})
        if period != StateData.ALL_PERIODS:
            loader_options.update({"EntryNumber": period})
    loader_alg = create_child_algorithm(parent_alg, loader_name, **loader_options)
    return run_loader(loader_alg, file_information, is_transmission, period, parent_alg)
Exemple #53
0
    def _perform_initial_move(self, workspaces, state):
        move_name = "SANSMove"
        state_dict = state.property_manager
        move_options = {"SANSState": state_dict,
                        "MoveType": "InitialMove"}

        # If beam centre was specified then use it
        beam_coordinates = self.getProperty("BeamCoordinates").value
        if beam_coordinates:
            move_options.update({"BeamCoordinates": beam_coordinates})

        # If component was specified then use it
        component = self.getProperty("Component").value
        if beam_coordinates:
            move_options.update({"Component": component})

        move_alg = create_child_algorithm(self, move_name, **move_options)

        # The workspaces are stored in a dict: workspace_names (sample_scatter, etc) : ListOfWorkspaces
        for key, workspace_list in list(workspaces.items()):
            for workspace in workspace_list:
                move_alg.setProperty("Workspace", workspace)
                move_alg.execute()
def perform_can_subtraction(sample, can, parent_alg):
    """
    Subtracts the can from the sample workspace.

    We need to manually take care of the q resolution issue here.
    :param sample: the sample workspace
    :param can: the can workspace.
    :param parent_alg: a handle to the parent algorithm.
    :return: the subtracted workspace.
    """
    subtraction_name = "Minus"
    subtraction_options = {"LHSWorkspace": sample,
                           "RHSWorkspace": can,
                           "OutputWorkspace": EMPTY_NAME}
    subtraction_alg = create_child_algorithm(parent_alg, subtraction_name, **subtraction_options)
    subtraction_alg.execute()
    output_workspace = subtraction_alg.getProperty("OutputWorkspace").value

    # If the workspace is 1D and contains Q resolution (i.e. DX values), then we need to make sure that the
    # resulting output workspace contains the correct values
    correct_q_resolution_for_can(sample, can, output_workspace)

    return output_workspace
Exemple #55
0
def extract_multi_period_event_workspace(loader, index, output_workspace_property_name, parent_alg):
    """
    Extract a single workspace from a WorkspaceGroup.

    Note that we need to perform a CloneWorkspace operation because this is the only way to get an individual workspace
    from a WorkspaceGroup. They are extremely "sticky" and using the indexed access will only provide a weak pointer
    which means that we will have a dead reference once the WorkspaceGroup goes out of scope
    :param loader: an executed LoadEventNexus algorithm
    :param index: an index variable into the GroupWorkspace, not that it is offset by 1
    :param output_workspace_property_name: the name of the output workspace property, i.e. OutputWorkspace or
                                           MonitorWorkspace
    :param parent_alg: a handle to the parent algorithm
    :return: a single workspace
    """
    group_workspace = loader.getProperty(output_workspace_property_name).value
    group_workspace_index = index - 1
    workspace_of_interest = group_workspace[group_workspace_index]

    clone_name = "CloneWorkspace"
    clone_options = {"InputWorkspace": workspace_of_interest,
                     "OutputWorkspace": EMPTY_NAME}
    clone_alg = create_child_algorithm(parent_alg, clone_name, **clone_options)
    clone_alg.execute()
    return clone_alg.getProperty("OutputWorkspace").value
    def PyExec(self):
        # Get the input
        state = self._get_state()
        state_serialized = state.property_manager
        component_as_string = self.getProperty("Component").value
        progress = self._get_progress()

        # --------------------------------------------------------------------------------------------------------------
        # 1. Crop workspace by detector name
        #    This will create a reduced copy of the original workspace with only those spectra which are relevant
        #    for this particular reduction.
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Cropping ...")
        workspace = self._get_cropped_workspace(component_as_string)

        # --------------------------------------------------------------------------------------------
        # 2. Perform dark run subtraction
        #    This will subtract a dark background from the scatter workspace. Note that dark background subtraction
        #    will also affect the transmission calculation later on.
        # --------------------------------------------------------------------------------------------------------------

        # --------------------------------------------------------------------------------------------------------------
        # 3. Create event slice
        #    If we are dealing with an event workspace as input, this will cut out a time-based (user-defined) slice.
        #    In case of a histogram workspace, nothing happens.
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Event slicing ...")
        data_type_as_string = self.getProperty("DataType").value
        monitor_workspace = self._get_monitor_workspace()
        workspace, monitor_workspace, slice_event_factor = self._slice(state_serialized, workspace, monitor_workspace,
                                                                       data_type_as_string)

        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # COMPATIBILITY BEGIN
        # IMPORTANT: This section of the code should only be temporary. It allows us to convert to histogram
        # early on and hence compare the new reduction results with the output of the new reduction chain.
        # Once the new reduction chain is established, we should remove the compatibility feature.
        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        compatibility = state.compatibility
        is_event_workspace = isinstance(workspace, IEventWorkspace)
        if compatibility.use_compatibility_mode and is_event_workspace:
            # We convert the workspace here to a histogram workspace, since we cannot otherwise
            # compare the results between the old and the new reduction workspace in a meaningful manner.
            # The old one is histogram and the new one is event.
            # Rebin to monitor workspace
            if compatibility.time_rebin_string:
                rebin_name = "Rebin"
                rebin_option = {"InputWorkspace": workspace,
                                "Params": compatibility.time_rebin_string,
                                "OutputWorkspace": EMPTY_NAME,
                                "PreserveEvents": False}
                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
                rebin_alg.execute()
                workspace = rebin_alg.getProperty("OutputWorkspace").value
            else:
                rebin_name = "RebinToWorkspace"
                rebin_option = {"WorkspaceToRebin": workspace,
                                "WorkspaceToMatch": monitor_workspace,
                                "OutputWorkspace": EMPTY_NAME,
                                "PreserveEvents": False}
                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
                rebin_alg.execute()
                workspace = rebin_alg.getProperty("OutputWorkspace").value
        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # COMPATIBILITY END
        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

        # ------------------------------------------------------------
        # 4. Move the workspace into the correct position
        #    The detectors in the workspaces are set such that the beam centre is at (0,0). The position is
        #    a user-specified value which can be obtained with the help of the beam centre finder.
        # ------------------------------------------------------------
        progress.report("Moving ...")
        workspace = self._move(state_serialized, workspace, component_as_string)
        monitor_workspace = self._move(state_serialized, monitor_workspace, component_as_string)

        # --------------------------------------------------------------------------------------------------------------
        # 5. Apply masking (pixel masking and time masking)
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Masking ...")
        workspace = self._mask(state_serialized, workspace, component_as_string)

        # --------------------------------------------------------------------------------------------------------------
        # 6. Convert to Wavelength
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Converting to wavelength ...")
        workspace = self._convert_to_wavelength(state_serialized, workspace)

        # --------------------------------------------------------------------------------------------------------------
        # 7. Multiply by volume and absolute scale
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Multiplying by volume and absolute scale ...")
        workspace = self._scale(state_serialized, workspace)

        # --------------------------------------------------------------------------------------------------------------
        # 8. Create adjustment workspaces, those are
        #     1. pixel-based adjustments
        #     2. wavelength-based adjustments
        #     3. pixel-and-wavelength-based adjustments
        # Note that steps 4 to 7 could run in parallel if we don't use wide angle correction. If we do then the
        # creation of the adjustment workspaces requires the sample workspace itself and we have to run it sequentially.
        # We could consider to have a serial and a parallel strategy here, depending on the wide angle correction
        # settings. On the other hand it is not clear that this would be an advantage with the GIL.
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Creating adjustment workspaces ...")
        wavelength_adjustment_workspace, pixel_adjustment_workspace, wavelength_and_pixel_adjustment_workspace, \
            calculated_transmission_workspace, unfitted_transmission_workspace = \
            self._adjustment(state_serialized, workspace, monitor_workspace, component_as_string, data_type_as_string)

        # ------------------------------------------------------------
        # 9. Convert event workspaces to histogram workspaces
        # ------------------------------------------------------------
        progress.report("Converting to histogram mode ...")
        workspace = self._convert_to_histogram(workspace)

        # ------------------------------------------------------------
        # 10. Convert to Q
        # -----------------------------------------------------------
        progress.report("Converting to q ...")
        workspace, sum_of_counts, sum_of_norms = self._convert_to_q(state_serialized,
                                                                    workspace,
                                                                    wavelength_adjustment_workspace,
                                                                    pixel_adjustment_workspace,
                                                                    wavelength_and_pixel_adjustment_workspace)
        progress.report("Completed SANSReductionCore ...")

        # ------------------------------------------------------------
        # Populate the output
        # ------------------------------------------------------------
        self.setProperty("OutputWorkspace", workspace)

        # ------------------------------------------------------------
        # Diagnostic output
        # ------------------------------------------------------------
        if sum_of_counts:
            self.setProperty("SumOfCounts", sum_of_counts)
        if sum_of_norms:
            self.setProperty("SumOfNormFactors", sum_of_norms)

        if state.adjustment.show_transmission:
            self.setProperty("CalculatedTransmissionWorkspace", calculated_transmission_workspace)
            self.setProperty("UnfittedTransmissionWorkspace", unfitted_transmission_workspace)
    def PyExec(self):
        # Get state
        state = self._get_state()

        # Get reduction mode
        overall_reduction_mode = self._get_reduction_mode(state)

        # Decide which core reduction information to run, i.e. HAB, LAB, ALL, MERGED. In the case of ALL and MERGED,
        # the required simple reduction modes need to be run. Normally this is HAB and LAB, future implementations
        # might have more detectors though (or different types)
        reduction_setting_bundles = self._get_reduction_setting_bundles(state, overall_reduction_mode)

        # Run core reductions
        use_optimizations = self.getProperty("UseOptimizations").value
        save_can = self.getProperty("SaveCan").value

        # Create the reduction core algorithm
        reduction_name = "SANSReductionCore"
        reduction_options = {}
        reduction_alg = create_child_algorithm(self, reduction_name, **reduction_options)

        # Set up progress
        progress = self._get_progress(len(reduction_setting_bundles), overall_reduction_mode)

        # --------------------------------------------------------------------------------------------------------------
        # Reduction
        # --------------------------------------------------------------------------------------------------------------
        output_bundles = []
        output_parts_bundles = []
        output_transmission_bundles = []
        for reduction_setting_bundle in reduction_setting_bundles:
            progress.report("Running a single reduction ...")
            # We want to make use of optimizations here. If a can workspace has already been reduced with the same can
            # settings and is stored in the ADS, then we should use it (provided the user has optimizations enabled).
            if use_optimizations and reduction_setting_bundle.data_type is DataType.Can:
                output_bundle, output_parts_bundle, output_transmission_bundle = run_optimized_for_can(reduction_alg,
                                                                                                       reduction_setting_bundle)
            else:
                output_bundle, output_parts_bundle, output_transmission_bundle = run_core_reduction(reduction_alg,
                                                                                                    reduction_setting_bundle)
            output_bundles.append(output_bundle)
            output_parts_bundles.append(output_parts_bundle)
            output_transmission_bundles.append(output_transmission_bundle)

        reduction_mode_vs_output_workspaces = {}

        # --------------------------------------------------------------------------------------------------------------
        # Deal with non-merged
        # Note that we have non-merged workspaces even in the case of a merged reduction, ie LAB and HAB results
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Final clean up...")
        output_workspaces_non_merged = get_final_output_workspaces(output_bundles, self)
        reduction_mode_vs_output_workspaces.update(output_workspaces_non_merged)

        # --------------------------------------------------------------------------------------------------------------
        # Deal with merging
        # --------------------------------------------------------------------------------------------------------------
        # Merge if required with stitching etc.
        if overall_reduction_mode is ReductionMode.Merged:
            progress.report("Merging reductions ...")
            merge_bundle = get_merge_bundle_for_merge_request(output_parts_bundles, self)
            self.set_shift_and_scale_output(merge_bundle)
            reduction_mode_vs_output_workspaces.update({ReductionMode.Merged: merge_bundle.merged_workspace})
            scaled_HAB = strip_end_nans(merge_bundle.scaled_hab_workspace, self)
            reduction_mode_vs_output_workspaces.update({ISISReductionMode.HAB: scaled_HAB})

        # --------------------------------------------------------------------------------------------------------------
        # Set the output workspaces
        # --------------------------------------------------------------------------------------------------------------
        # Set sample logs
        # Todo: Set sample log -> Userfile and unfitted transmission workspace. Should probably set on
        # higher level (SANSBatch)
        # Set the output workspaces
        self.set_output_workspaces(reduction_mode_vs_output_workspaces)

        # --------------------------------------------------------------------------------------------------------------
        # Set the reduced can workspaces on the output if optimizations are
        # enabled. This will allow SANSBatchReduction to add them to the ADS.
        # --------------------------------------------------------------------------------------------------------------
        if use_optimizations:
            self.set_reduced_can_workspace_on_output(output_bundles, output_parts_bundles)

        if save_can:
            self.set_can_and_sam_on_output(output_bundles)

        self.set_transmission_workspaces_on_output(output_transmission_bundles,
                                                   state.adjustment.calculate_transmission.fit)
Exemple #58
0
def run_added_loader(loader, file_information, is_transmission, period, parent_alg):
    """
    Runs the loader for added workspaces.

    This is a complicated matter. The added workspaces can be histogram- or event-based and can consist of
    multi-period data.
    1. Histogram Data: Since we use LoadNexusProcessed we cannot load the monitors separately. We have to make use of
       the algorithm ExtractMonitors in order to split the detector from the monitor
       (if we are dealing with non-transmission data)
    2. Event Data: The added event data and the corresponding monitor data set are stored as two separate units in the
       file. There are several cases to consider
       i. We only have one period, this means that the first entry is the added event data and the second
       entry is the added monitor data
       ii. We have N periods. The first N entries are the added event data and the second N entries are the
       corresponding monitors.
       iii. We have N periods but only want to load period K. We get again only two entries but we need to
       request the kth entry for the added event data and the k + NumPeriods entry for the monitor.

    :param loader: a handles to a preset load algorithm
    :param file_information: the FileInformation object
    :param is_transmission: if  the set is a transmission
    :param period: the selected period
    :param parent_alg: a handle to the parent algorithm
    :return: workspaces and monitors
    """
    def extract_histogram_data(load_alg, num_periods, selected_period):
        ws_collection = []
        if num_periods == 1:
            ws_collection.append(load_alg.getProperty("OutputWorkspace").value)
        elif num_periods > 1 and selected_period is not StateData.ALL_PERIODS:
            ws_collection.append(load_alg.getProperty("OutputWorkspace").value)
        else:
            for index in range(1, num_periods + 1):
                ws_collection.append(load_alg.getProperty(OUTPUT_WORKSPACE_GROUP + str(index)).value)
        return ws_collection

    def extract_event_data(load_alg, num_periods, selected_period):
        ws_collection = []
        ws_monitor_collection = []
        if num_periods == 1 or (num_periods > 1 and selected_period is not StateData.ALL_PERIODS):
            # First get the added event data
            period_to_load = selected_period if selected_period is not StateData.ALL_PERIODS else 1
            offset = num_periods
            load_alg.setProperty("EntryNumber", period_to_load)
            load_alg.execute()
            ws_collection.append(load_alg.getProperty("OutputWorkspace").value)

            # Second get the added monitor data
            load_alg.setProperty("EntryNumber", period_to_load + offset)
            load_alg.execute()
            ws_monitor_collection.append(load_alg.getProperty("OutputWorkspace").value)
        else:
            load_alg.execute()
            workspace_indices = list(range(1, number_of_periods + 1))
            monitor_indices = list(range(number_of_periods + 1, number_of_periods*2 + 1))
            for workspace_index, monitor_index in zip(workspace_indices, monitor_indices):
                ws_collection.append(load_alg.getProperty(OUTPUT_WORKSPACE_GROUP + str(workspace_index)).value)
                ws_monitor_collection.append(load_alg.getProperty(OUTPUT_WORKSPACE_GROUP + str(monitor_index)).value)
        return ws_collection, ws_monitor_collection

    workspaces = []
    workspace_monitors = []
    number_of_periods = file_information.get_number_of_periods()

    # Dealing with added event data or histogram data is vastly different, hence we need to separate paths
    if file_information.is_event_mode():
        if is_transmission:
            raise RuntimeError("SANSLoad: Cannot load event-mode data for transmission calculation. Attempted to "
                               "load the file {0} which is event-based as transmission "
                               "data.".format(file_information.get_file_name()))
        workspaces, workspace_monitors = extract_event_data(loader, number_of_periods, period)
    else:
        # In the case of histogram data we need to consider the following.
        # The data is combined with the monitors since we load with LoadNexusProcessed. Hence we need to split the
        # workspace at this point with ExtractMonitors if we are not looking at a transmission run.
        loader.execute()
        workspace_collection = extract_histogram_data(loader, number_of_periods, period)
        if not is_transmission:
            extract_name = "ExtractMonitors"
            extract_options = {"DetectorWorkspace": "dummy1",
                               "MonitorWorkspace": "dummy2"}
            extract_alg = create_child_algorithm(parent_alg, extract_name, **extract_options)
            for workspace in workspace_collection:
                extract_alg.setProperty("InputWorkspace", workspace)
                extract_alg.execute()
                workspaces.append(extract_alg.getProperty("DetectorWorkspace").value)
                workspace_monitors.append(extract_alg.getProperty("MonitorWorkspace").value)
        else:
            for workspace in workspace_collection:
                workspaces.append(workspace)
    return workspaces, workspace_monitors
    def PyExec(self):
        # --------
        # Clone the input workspaces
        # --------
        # Get the input
        state = self._get_state()
        # --------
        # Change cloned state
        # --------
        # Remove phi Masking
        if state.mask.phi_min:
            state.mask.phi_min = 0.0
        if state.mask.phi_max:
            state.mask.phi_max = 0.0

        component = self.getProperty("Component").value
        component_as_string = DetectorType.to_string(component)

        # Set test centre
        state.move.detectors[component_as_string].sample_centre_pos1 = self.getProperty(
            "Centre1").value
        state.move.detectors[component_as_string].sample_centre_pos2 = self.getProperty(
            "Centre2").value

        state_serialized = state.property_manager

        progress = self._get_progress()

        # --------------------------------------------------------------------------------------------------------------
        # 1. Crop workspace by detector name
        #    This will create a reduced copy of the original workspace with only those spectra which are relevant
        #    for this particular reduction.
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Cropping ...")
        scatter_data = self._get_cropped_workspace(component_as_string)

        # --------------------------------------------------------------------------------------------
        # 2. Perform dark run subtraction
        #    This will subtract a dark background from the scatter workspace. Note that dark background subtraction
        #    will also affect the transmission calculation later on.
        # --------------------------------------------------------------------------------------------------------------

        # --------------------------------------------------------------------------------------------------------------
        # 3. Create event slice
        #    If we are dealing with an event workspace as input, this will cut out a time-based (user-defined) slice.
        #    In case of a histogram workspace, nothing happens.
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Event slicing ...")
        monitor_scatter_date = self._get_monitor_workspace()
        scatter_data, monitor_scatter_date, slice_event_factor = self._slice(state_serialized, scatter_data,
                                                                             monitor_scatter_date,
                                                                             'Sample')

        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # COMPATIBILITY BEGIN
        # IMPORTANT: This section of the code should only be temporary. It allows us to convert to histogram
        # early on and hence compare the new reduction results with the output of the new reduction chain.
        # Once the new reduction chain is established, we should remove the compatibility feature.
        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        compatibility = state.compatibility
        is_event_workspace = isinstance(scatter_data, IEventWorkspace)
        if compatibility.use_compatibility_mode and is_event_workspace:
            # We convert the workspace here to a histogram workspace, since we cannot otherwise
            # compare the results between the old and the new reduction workspace in a meaningful manner.
            # The old one is histogram and the new one is event.
            # Rebin to monitor workspace
            if compatibility.time_rebin_string:
                rebin_name = "Rebin"
                rebin_option = {"InputWorkspace": scatter_data,
                                "Params": compatibility.time_rebin_string,
                                "OutputWorkspace": EMPTY_NAME,
                                "PreserveEvents": False}
                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
                rebin_alg.execute()
                scatter_data = rebin_alg.getProperty("OutputWorkspace").value
            else:
                rebin_name = "RebinToWorkspace"
                rebin_option = {"WorkspaceToRebin": scatter_data,
                                "WorkspaceToMatch": monitor_scatter_date,
                                "OutputWorkspace": EMPTY_NAME,
                                "PreserveEvents": False}
                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
                rebin_alg.execute()
                scatter_data = rebin_alg.getProperty("OutputWorkspace").value
        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # COMPATIBILITY END
        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # ------------------------------------------------------------
        # 4. Move the workspace into the correct position
        #    The detectors in the workspaces are set such that the beam centre is at (0,0). The position is
        #    a user-specified value which can be obtained with the help of the beam centre finder.
        # ------------------------------------------------------------
        scatter_data = self._move(state_serialized, scatter_data, component_as_string)

        # --------------------------------------------------------------------------------------------------------------
        # 5. Apply masking (pixel masking and time masking)
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Masking ...")
        scatter_data = self._mask(state_serialized, scatter_data, component_as_string)

        # --------------------------------------------------------------------------------------------------------------
        # 6. Convert to Wavelength
        # --------------------------------------------------------------------------------------------------------------
        progress.report("Converting to wavelength ...")
        scatter_data = self._convert_to_wavelength(state_serialized, scatter_data)

        centre1 = self.getProperty("Centre1").value
        centre2 = self.getProperty("Centre2").value
        r_min = self.getProperty("RMin").value
        tolerance = self.getProperty("Tolerance").value
        output_table = self._run_center_of_mass_position(scatter_data, centre1, centre2, r_min, tolerance)

        centre1_out = output_table[0]
        centre2_out = output_table[1]

        self.setProperty("Centre1", centre1_out + centre1)
        self.setProperty("Centre2", centre2_out + centre2)