コード例 #1
0
ファイル: nexus_conversion.py プロジェクト: williamfgc/PyRS
    def __init__(self,
                 nexus_file_name,
                 mask_file_name=None,
                 extra_logs=list()):
        """Initialization

        Parameters
        ----------
        nexus_file_name : str
            Name of NeXus file
        mask_file_name : str
            Name of masking file
        extra_logs : list, tuple
            list of string with no default logs to keep in project file
        """
        # configure logging for this class
        self._log = Logger(__name__)

        # validate NeXus file exists
        checkdatatypes.check_file_name(nexus_file_name, True, False, False,
                                       'NeXus file')
        self._nexus_name = nexus_file_name

        # validate mask file exists
        if mask_file_name is None:
            self._mask_file_name = None
        else:
            checkdatatypes.check_file_name(mask_file_name, True, False, False,
                                           'Mask file')
            self._mask_file_name = mask_file_name
            if not mask_file_name.lower().endswith('.xml'):
                raise NotImplementedError(
                    'Only Mantid mask in XML format is supported now.  File '
                    '{} with type {} is not supported yet.'
                    ''.format(mask_file_name,
                              mask_file_name.split('.')[-1]))

        # workspaces
        self._event_ws_name = os.path.basename(nexus_file_name).split('.')[0]

        logs_to_keep = list(extra_logs)
        logs_to_keep.extend(DEFAULT_KEEP_LOGS)

        self.__load_logs(logs_to_keep)

        # load the mask
        self.mask_array = None  # TODO to promote direct access
        if mask_file_name:
            self.__load_mask(mask_file_name)

        # create the hidra workspace
        self._hidra_workspace = workspaces.HidraWorkspace(self._nexus_name)

        # Set a default instrument with this workspace
        # set up instrument
        # initialize instrument with hard coded values
        instrument = AnglerCameraDetectorGeometry(NUM_PIXEL_1D, NUM_PIXEL_1D,
                                                  PIXEL_SIZE, PIXEL_SIZE,
                                                  ARM_LENGTH, False)
        self._hidra_workspace.set_instrument_geometry(instrument)

        # project file
        self._project_file = None
コード例 #2
0
    def __init__(self,
                 ws,
                 parent=None,
                 window_flags=Qt.Window,
                 model=None,
                 view=None,
                 conf=None):
        """
        Create a presenter for controlling the slice display for a workspace
        :param ws: Workspace containing data to display and slice
        :param parent: An optional parent widget
        :param window_flags: An optional set of window flags
        :param model: A model to define slicing operations. If None uses SliceViewerModel
        :param view: A view to display the operations. If None uses SliceViewerView
        """
        model: SliceViewerModel = model if model else SliceViewerModel(ws)
        self.view = view if view else SliceViewerView(
            self, Dimensions.get_dimensions_info(ws),
            model.can_normalize_workspace(), parent, window_flags, conf)
        super().__init__(ws, self.view.data_view, model)

        self._logger = Logger("SliceViewer")
        self._peaks_presenter: PeaksViewerCollectionPresenter = None
        self.conf = conf

        # Acts as a 'time capsule' to the properties of the model at this
        # point in the execution. By the time the ADS observer calls self.replace_workspace,
        # the workspace associated with self.model has already been changed.
        self.initial_model_properties = model.get_properties()
        self._new_plot_method, self.update_plot_data = self._decide_plot_update_methods(
        )

        self.view.setWindowTitle(self.model.get_title())
        self.view.data_view.create_axes_orthogonal(
            redraw_on_zoom=not WorkspaceInfo.can_support_dynamic_rebinning(
                self.model.ws))

        if self.model.can_normalize_workspace():
            self.view.data_view.set_normalization(ws)
            self.view.data_view.norm_opts.currentTextChanged.connect(
                self.normalization_changed)
        if not self.model.can_support_peaks_overlays():
            self.view.data_view.disable_tool_button(ToolItemText.OVERLAY_PEAKS)
        # check whether to enable non-orthog view
        # don't know whether can always assume init with display indices (0,1) - so get sliceinfo
        sliceinfo = self.get_sliceinfo()
        if not sliceinfo.can_support_nonorthogonal_axes():
            self.view.data_view.disable_tool_button(
                ToolItemText.NONORTHOGONAL_AXES)

        self.view.data_view.help_button.clicked.connect(
            self.action_open_help_window)

        self.refresh_view()

        # Start the GUI with zoom selected.
        self.view.data_view.activate_tool(ToolItemText.ZOOM)

        self.ads_observer = SliceViewerADSObserver(self.replace_workspace,
                                                   self.rename_workspace,
                                                   self.ADS_cleared,
                                                   self.delete_workspace)
コード例 #3
0
    def reduce(self):
        """
            Go through the list of reduction steps
        """
        t_0 = time.time()
        self.output_workspaces = []

        # Log text
        self.log_text = "%s reduction - %s\n" % (self.instrument_name,
                                                 time.ctime())
        self.log_text += "Mantid Python API v2\n"

        # Go through the list of steps that are common to all data files
        self.pre_process()

        if self.reduction_algorithm is None:
            Logger("Reducer").error(
                "A reduction algorithm wasn't set: stopping")
            return

        _first_ws_name = None
        for ws in self._data_files.keys():
            if _first_ws_name is None:
                _first_ws_name = ws
            alg = AlgorithmManager.create(self.reduction_algorithm)
            alg.initialize()
            props = [p.name for p in alg.getProperties()]

            # Check whether the data is already available or needs to be loaded
            if self._data_files[ws] is not None:
                datafile = self._data_files[ws]
                if type(datafile) == list:
                    datafile = ','.join(datafile)
                if "Filename" in props:
                    alg.setPropertyValue("Filename", datafile)
                else:
                    msg = "Can't set the Filename property on %s" % self.reduction_algorithm
                    Logger("Reducer").error(msg)
            else:
                if "InputWorkspace" in props:
                    alg.setPropertyValue("InputWorkspace", ws)
                else:
                    msg = "Can't set the InputWorkspace property on %s" % self.reduction_algorithm
                    Logger("Reducer").error(msg)

            if "ReductionProperties" in props:
                alg.setPropertyValue("ReductionProperties",
                                     self.get_reduction_table_name())

            if "OutputWorkspace" in props:
                alg.setPropertyValue("OutputWorkspace", ws)

            alg.execute()
            if "OutputMessage" in props:
                self.log_text += alg.getProperty("OutputMessage").value + '\n'

        #any clean up, possibly removing workspaces
        self.post_process()

        # Determine which directory to use
        output_dir = self._data_path
        if self._output_path is not None:
            if os.path.isdir(self._output_path):
                output_dir = self._output_path
            else:
                output_dir = os.path.expanduser('~')

        self.log_text += "Reduction completed in %g sec\n" % (time.time() -
                                                              t_0)
        if _first_ws_name is not None:
            log_path = os.path.join(output_dir,
                                    "%s_reduction.log" % _first_ws_name)
        else:
            log_path = os.path.join(output_dir,
                                    "%s_reduction.log" % self.instrument_name)
        self.log_text += "Log saved to %s" % log_path

        # Write the log to file
        f = open(log_path, 'a')
        f.write("\n-------------------------------------------\n")
        f.write(self.log_text)
        f.close()
        return self.log_text
コード例 #4
0
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
#     NScD Oak Ridge National Laboratory, European Spallation Source
#     & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)

import mantid.simpleapi as mantid
from mantid.kernel import Logger

muon_logger = Logger('Muon-Algs')


def run_MuonPreProcess(parameter_dict):
    """
    Apply the MuonPreProcess algorithm with the properties supplied through
    the input dictionary of {proeprty_name:property_value} pairs.
    Returns the calculated workspace.
    """
    alg = mantid.AlgorithmManager.create("MuonPreProcess")
    alg.initialize()
    alg.setAlwaysStoreInADS(False)
    alg.setProperty("OutputWorkspace", "__notUsed")
    alg.setProperties(parameter_dict)
    alg.execute()
    return alg.getProperty("OutputWorkspace").value


def run_MuonGroupingCounts(parameter_dict):
    """
コード例 #5
0
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
#     NScD Oak Ridge National Laboratory, European Spallation Source
#     & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +

import mantid.simpleapi as mantid_api
import os
from mantid.kernel import Logger
import BilbyCustomFunctions_Reduction

try:
    import mantidplot
except (Exception, Warning):
    mantidplot = None

ansto_logger = Logger("AnstoDataReduction")


class RunBilbyReduction:
    def __init__(self,
                 reduction_settings_file,
                 reduction_settings_index,
                 file_index,
                 tube_shift_correction_file,
                 save_files=True,
                 use_default_save_directory=False):
        self.reduction_settings_file = reduction_settings_file
        self.reduction_settings_index = reduction_settings_index
        self.file_index = file_index
        self.tube_shift_correction_file = tube_shift_correction_file
        self.save_files = save_files
コード例 #6
0
def _check_all_or_no_q_values(q_min, q_max):
    if (q_min is None) != (q_max is None):
        error_msg = "Both q_min and q_max parameters should be provided, not just one"
        Logger("data_stitching").error(error_msg)
        raise RuntimeError(error_msg)
コード例 #7
0
def SaveIqAscii(reducer=None, process=''):
    """ Old command for backward compatibility """
    msg = "SaveIqAscii is not longer used:\n  "
    msg += "Please use 'SaveIq' instead\n  "
    Logger("CommandInterface").warning(msg)
    ReductionSingleton().reduction_properties["ProcessInfo"] = str(process)
コード例 #8
0
    def find_beam_centre(self, state):
        """
        This is called from the GUI and runs the find beam centre algorithm given a state model and a beam_centre_model object.

        :param state: A SANS state object
        :param beam_centre_model: An instance of the BeamCentreModel class.
        :returns: The centre position found.
        """
        centre_finder = self.SANSCentreFinder()
        find_direction = None
        if self.up_down and self.left_right:
            find_direction = FindDirectionEnum.All
        elif self.up_down:
            find_direction = FindDirectionEnum.Up_Down
        elif self.left_right:
            find_direction = FindDirectionEnum.Left_Right
        else:
            logger = Logger("CentreFinder")
            logger.notice("Have chosen no find direction exiting early")
            return {"pos1": self.lab_pos_1, "pos2": self.lab_pos_2}

        if self.q_min:
            state.convert_to_q.q_min = self.q_min
        if self.q_max:
            state.convert_to_q.q_max = self.q_max

        if self.COM:
            centre = centre_finder(state,
                                   r_min=self.r_min,
                                   r_max=self.r_max,
                                   max_iter=self.max_iterations,
                                   x_start=self.lab_pos_1,
                                   y_start=self.lab_pos_2,
                                   tolerance=self.tolerance,
                                   find_direction=find_direction,
                                   reduction_method=False,
                                   component=self.component)

            centre = centre_finder(state,
                                   r_min=self.r_min,
                                   r_max=self.r_max,
                                   max_iter=self.max_iterations,
                                   x_start=centre['pos1'],
                                   y_start=centre['pos2'],
                                   tolerance=self.tolerance,
                                   find_direction=find_direction,
                                   reduction_method=True,
                                   verbose=self.verbose,
                                   component=self.component)
        else:
            centre = centre_finder(state,
                                   r_min=self.r_min,
                                   r_max=self.r_max,
                                   max_iter=self.max_iterations,
                                   x_start=self.lab_pos_1,
                                   y_start=self.lab_pos_2,
                                   tolerance=self.tolerance,
                                   find_direction=find_direction,
                                   reduction_method=True,
                                   verbose=self.verbose,
                                   component=self.component)
        return centre
コード例 #9
0
    def filterByLogValue(self):
        """ Filter by log value
        """
        # Generate event filter
        kwargs = {}
        samplelog = str(self.ui.comboBox_2.currentText())
        if len(samplelog) == 0:
            error_msg = "No sample log is selected!"
            Logger("Filter_Events").error(error_msg)
            return

        if self.ui.lineEdit_3.text() != "":
            rel_starttime = float(self.ui.lineEdit_3.text())
            kwargs["StartTime"] = str(rel_starttime)

        if self.ui.lineEdit_4.text() != "":
            rel_stoptime = float(self.ui.lineEdit_4.text())
            kwargs["StopTime"] = str(rel_stoptime)

        if self.ui.lineEdit_5.text() != "":
            minlogvalue = float(self.ui.lineEdit_5.text())
            kwargs["MinimumLogValue"] = minlogvalue

        if self.ui.lineEdit_6.text() != "":
            maxlogvalue = float(self.ui.lineEdit_6.text())
            kwargs["MaximumLogValue"] = maxlogvalue

        if self.ui.lineEdit_7.text() != "":
            logvalueintv = float(self.ui.lineEdit_7.text())
            kwargs["LogValueInterval"] = logvalueintv
        logvalchangedir = str(self.ui.comboBox_4.currentText())
        kwargs["FilterLogValueByChangingDirection"] = logvalchangedir

        if self.ui.lineEdit_9.text() != "":
            logvalueintv = float(self.ui.lineEdit_9.text())
            kwargs["TimeTolerance"] = logvalueintv
        logboundtype = str(self.ui.comboBox_5.currentText())
        kwargs["LogBoundary"] = logboundtype

        if self.ui.lineEdit_8.text() != "":
            logvaluetol = float(self.ui.lineEdit_8.text())
            kwargs["LogValueTolerance"] = logvaluetol

        splitwsname = str(self._dataWS) + "_splitters"
        splitinfowsname = str(self._dataWS) + "_info"
        fastLog = self.ui.checkBox_fastLog.isChecked()

        title = str(self.ui.lineEdit_title.text())

        splitws, infows = api.GenerateEventsFilter(
            InputWorkspace      = self._dataWS,
            UnitOfTime          = "Seconds",
            TitleOfSplitters    = title,
            OutputWorkspace     = splitwsname,
            LogName             = samplelog,
            FastLog             = fastLog,
            InformationWorkspace = splitinfowsname, **kwargs)

        try:
            self.splitWksp(splitws, infows)
        except RuntimeError as e:
            self._setErrorMsg("Splitting Failed!\n %s" % (str(e)))

        return
コード例 #10
0
 def __init__(self, parent_presenter):
     super(MaskingTablePresenter, self).__init__()
     self._view = None
     self._parent_presenter = parent_presenter
     self._work_handler = WorkHandler()
     self._logger = Logger("SANS")
コード例 #11
0
class BeamCentreModel(object):
    logger = Logger("CentreFinder")

    def __init__(self, SANSCentreFinder):
        super(BeamCentreModel, self).__init__()
        self._max_iterations = 10
        self._r_min = 0
        self._r_max = 0
        self._left_right = True
        self._up_down = True
        self._tolerance = 0.0001251
        self._lab_pos_1 = ''
        self._lab_pos_2 = ''
        self._hab_pos_2 = ''
        self._hab_pos_1 = ''
        self.scale_1 = 1000
        self.scale_2 = 1000
        self.COM = False
        self.verbose = False
        self.q_min = 0.01
        self.q_max = 0.1
        self._component = DetectorType.LAB
        self.update_lab = True
        self.update_hab = True

        self.reset_inst_defaults(instrument=SANSInstrument.NO_INSTRUMENT)

        self.SANSCentreFinder = SANSCentreFinder

    def __eq__(self, other):
        return self.__dict__ == other.__dict__

    def reset_inst_defaults(self, instrument):
        if instrument is SANSInstrument.LOQ:
            self._r_min = 96
            self._r_max = 216

            # TODO HAB on LOQ prefers 96-750
        else:
            # All other instruments hard-code this as follows
            self._r_min = 60
            self._r_max = 280

        self.set_scaling(instrument=instrument)

    def set_scaling(self, instrument):
        self.scale_1 = 1000
        self.scale_2 = 1000

        if instrument == SANSInstrument.LARMOR:
            self.scale_1 = 1.0

    def find_beam_centre(self, state):
        """
        This is called from the GUI and runs the find beam centre algorithm given a state model and a beam_centre_model object.

        :param state: A SANS state object
        :param beam_centre_model: An instance of the BeamCentreModel class.
        :returns: The centre position found.
        """
        centre_finder = self.SANSCentreFinder()
        find_direction = self.get_finder_direction()
        if not find_direction:
            self.logger.error("Have chosen no find direction exiting early")
            return

        pos_1 = self._lab_pos_1 if self.component is DetectorType.LAB else self._hab_pos_1
        pos_2 = self._lab_pos_2 if self.component is DetectorType.LAB else self._hab_pos_2

        if self.COM:
            centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max,
                                   max_iter=self.max_iterations,
                                   x_start=pos_1, y_start=pos_2,
                                   tolerance=self.tolerance,
                                   find_direction=find_direction, reduction_method=False, component=self.component)

            centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max,
                                   max_iter=self.max_iterations,
                                   x_start=centre['pos1'], y_start=centre['pos2'],
                                   tolerance=self.tolerance,
                                   find_direction=find_direction, reduction_method=True,
                                   verbose=self.verbose, component=self.component)
        else:
            centre = centre_finder(state, r_min=self.r_min, r_max=self.r_max,
                                   max_iter=self.max_iterations, x_start=pos_1,
                                   y_start=pos_2, tolerance=self.tolerance,
                                   find_direction=find_direction, reduction_method=True,
                                   verbose=self.verbose, component=self.component)

        self._update_centre_positions(results=centre)

    def _update_centre_positions(self, results):
        if self.component is DetectorType.LAB:
            self.lab_pos_1 = results["pos1"]
            self.lab_pos_2 = results["pos2"]
        elif self.component is DetectorType.HAB:
            self.hab_pos_1 = results['pos1']
            self.hab_pos_2 = results['pos2']
        else:
            raise RuntimeError("Unexpected detector type, got %r" % results)

    def get_finder_direction(self):
        find_direction = None
        if self.up_down and self.left_right:
            find_direction = FindDirectionEnum.ALL
        elif self.up_down:
            find_direction = FindDirectionEnum.UP_DOWN
        elif self.left_right:
            find_direction = FindDirectionEnum.LEFT_RIGHT

        return find_direction

    @property
    def max_iterations(self):
        return self._max_iterations

    @max_iterations.setter
    def max_iterations(self, value):
        self._max_iterations = value

    @property
    def r_min(self):
        return self._r_min

    @r_min.setter
    def r_min(self, value):
        self._r_min = value

    @property
    def r_max(self):
        return self._r_max

    @r_max.setter
    def r_max(self, value):
        self._r_max = value

    @property
    def q_min(self):
        return self._q_min

    @q_min.setter
    def q_min(self, value):
        self._q_min = value

    @property
    def q_max(self):
        return self._q_max

    @q_max.setter
    def q_max(self, value):
        self._q_max = value

    @property
    def left_right(self):
        return self._left_right

    @left_right.setter
    def left_right(self, value):
        self._left_right = value

    @property
    def up_down(self):
        return self._up_down

    @up_down.setter
    def up_down(self, value):
        self._up_down = value

    @property
    def verbose(self):
        return self._verbose

    @verbose.setter
    def verbose(self, value):
        self._verbose = value

    @property
    def COM(self):
        return self._COM

    @COM.setter
    def COM(self, value):
        self._COM = value

    @property
    def tolerance(self):
        return self._tolerance

    @tolerance.setter
    def tolerance(self, value):
        self._tolerance = value

    @property
    def lab_pos_1(self):
        return self._lab_pos_1

    @lab_pos_1.setter
    def lab_pos_1(self, value):
        self._lab_pos_1 = value

    @property
    def lab_pos_2(self):
        return self._lab_pos_2

    @lab_pos_2.setter
    def lab_pos_2(self, value):
        self._lab_pos_2 = value

    @property
    def hab_pos_1(self):
        return self._hab_pos_1

    @hab_pos_1.setter
    def hab_pos_1(self, value):
        self._hab_pos_1 = value

    @property
    def hab_pos_2(self):
        return self._hab_pos_2

    @hab_pos_2.setter
    def hab_pos_2(self, value):
        self._hab_pos_2 = value

    @property
    def component(self):
        return self._component

    @component.setter
    def component(self, value):
        self._component = value

    @property
    def update_hab(self):
        return self._update_hab

    @update_hab.setter
    def update_hab(self, value):
        self._update_hab = value

    @property
    def update_lab(self):
        return self._update_lab

    @update_lab.setter
    def update_lab(self, value):
        self._update_lab = value
コード例 #12
0
def stitch(data_list=[],
           q_min=None,
           q_max=None,
           output_workspace=None,
           scale=None,
           save_output=False):
    """
        @param data_list: list of N data files or workspaces to stitch
        @param q_min: list of N-1 Qmin values of overlap regions
        @param q_max: list of N-1 Qmax values of overlap regions
        @param output_workspace: name of the output workspace for the combined data
        @param scale: single overall scaling factor, of N scaling factors (one for each data set)
        @param save_output: If True, the combined output will be saved as XML
    """

    # Sanity check: q_min and q_max can either both be None or both be
    # of length N-1 where N is the length of data_list
    if (q_min is not None and q_max is None) or \
       (q_max is not None and q_min is None):
        error_msg = "Both q_min and q_max parameters should be provided, not just one"
        Logger("data_stitching").error(error_msg)
        raise RuntimeError, error_msg

    if not type(data_list) == list:
        error_msg = "The data_list parameter should be a list"
        Logger("data_stitching").error(error_msg)
        raise RuntimeError, error_msg

    n_data_sets = len(data_list)
    if n_data_sets < 2:
        error_msg = "The data_list parameter should contain at least two data sets"
        Logger("data_stitching").error(error_msg)
        raise RuntimeError, error_msg

    # Check whether we just need to scale the data sets using the provided
    # scaling factors
    has_scale_factors = False
    if type(scale) == list:
        if len(scale) == n_data_sets:
            has_scale_factors = True
        else:
            error_msg = "If the scale parameter is provided as a list, it should have the same length as data_list"
            Logger("data_stitching").error(error_msg)
            raise RuntimeError, error_msg

    is_q_range_limited = False
    if q_min is not None and q_max is not None:
        is_q_range_limited = True
        if type(q_min) in [int, float]:
            q_min = [q_min]
        if type(q_max) in [int, float]:
            q_max = [q_max]

        if not type(q_min) == list or not type(q_max) == list:
            error_msg = "The q_min and q_max parameters must be lists"
            Logger("data_stitching").error(error_msg)
            raise RuntimeError, error_msg

        if not len(q_min) == n_data_sets - 1:
            error_msg = "The length of q_min must be 1 shorter than the length of data_list: q_min=%s" % str(
                q_min)
            Logger("data_stitching").error(error_msg)
            raise RuntimeError, error_msg
        if not len(q_max) == n_data_sets - 1:
            error_msg = "The length of q_max must be 1 shorter than the length of data_list: q_max=%s" % str(
                q_max)
            Logger("data_stitching").error(error_msg)
            raise RuntimeError, error_msg

        # Sanity check
        for i in range(n_data_sets - 1):
            try:
                q_min[i] = float(q_min[i])
                q_max[i] = float(q_max[i])
            except:
                error_msg = "The Q range parameters are invalid: q_min=%s   q_max=%s" % (
                    str(q_min), str(q_max))
                Logger("data_stitching").error(error_msg)
                raise RuntimeError, error_msg
    else:
        q_min = (n_data_sets - 1) * [None]
        q_max = (n_data_sets - 1) * [None]

    # Prepare the data sets
    s = Stitcher()

    for i in range(n_data_sets):
        d = DataSet(data_list[i])
        d.load(True)
        # Set the Q range to be used to stitch
        xmin, xmax = d.get_range()
        if is_q_range_limited:
            if i == 0:
                xmax = q_max[i]
            elif i < n_data_sets - 1:
                xmin = q_min[i - 1]
                xmax = q_max[i]
            elif i == n_data_sets - 1:
                xmin = q_min[i - 1]

        d.set_range(xmin, xmax)

        # Set the scale of the reference data as needed
        if has_scale_factors:
            d.set_scale(float(scale[i]))
        elif i == 0 and type(scale) in [int, float]:
            d.set_scale(scale)

        s.append(d)

    # Set the reference data (index of the data set in the workspace list)
    s.set_reference(0)
    if not has_scale_factors:
        s.compute()

    # Now that we have the scaling factors computed, simply apply them (not very pretty...)
    for i in range(n_data_sets):
        d = s.get_data_set(i)
        xmin, xmax = d.get_range()
        if i > 0:
            xmin = q_min[i - 1]
        if i < n_data_sets - 1:
            xmax = q_max[i]

        d.apply_scale(xmin, xmax)

    # Create combined output
    if output_workspace is not None:
        s.get_scaled_data(workspace=output_workspace)

    # Save output to a file
    if save_output:
        if output_workspace is None:
            output_workspace = "combined_scaled_Iq"
        s.save_combined(output_workspace + ".xml",
                        as_canSAS=True,
                        workspace=output_workspace)
コード例 #13
0
#     & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name,protected-access
from __future__ import (absolute_import, division, print_function)
import six
import os
from qtpy.QtWidgets import (QFrame, QGroupBox, QMessageBox)  # noqa
from qtpy.QtGui import (QDoubleValidator)  # noqa
import reduction_gui.widgets.util as util
from reduction_gui.reduction.sans.hfir_sample_script import SampleData
from reduction_gui.widgets.base_widget import BaseWidget
try:
    from mantidqt.utils.qt import load_ui
except ImportError:
    from mantid.kernel import Logger
    Logger("DirectBeam").information('Using legacy ui importer')
    from mantidplot import load_ui

if six.PY3:
    unicode = str


class DirectBeam(BaseWidget):
    """
        Widget for the direct beam transmission calculation options.
    """

    def __init__(self, parent=None, state=None, settings=None, data_type=None, data_proxy=None):
        super(DirectBeam, self).__init__(parent, state, settings, data_type, data_proxy=data_proxy)

        class DirectBeamFrame(QGroupBox):
コード例 #14
0
from qtpy.QtWidgets import (QAction, QDialog, QFileDialog, QMainWindow,
                            QMessageBox)  # noqa
from qtpy.QtCore import (QFile, QFileInfo, QSettings)  # noqa
from mantid.kernel import Logger

# Check whether Mantid is available
CAN_REDUCE = False
try:
    CAN_REDUCE = True
    from mantid.kernel import ConfigService
except ImportError:
    pass
try:
    from mantidqt.utils.qt import load_ui  # noqa
except ImportError:
    Logger("ReductionGUI").information('Using legacy ui importer')
    from mantidplot import load_ui  # noqa

unicode = str

STARTUP_WARNING = ""

if CAN_REDUCE:
    try:
        import reduction  # noqa

        if os.path.splitext(os.path.basename(
                reduction.__file__))[0] == "reduction":
            home_dir = os.path.expanduser('~')
            if os.path.abspath(reduction.__file__).startswith(home_dir):
                STARTUP_WARNING = "The following file is in your home area, please delete it and restart Mantid:\n\n"
コード例 #15
0
#
# Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI,
#   NScD Oak Ridge National Laboratory, European Spallation Source,
#   Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
from qtpy.QtWidgets import (QButtonGroup, QFrame)  # noqa
from qtpy.QtGui import (QIntValidator)  # noqa
from reduction_gui.widgets.base_widget import BaseWidget
from reduction_gui.reduction.inelastic.dgs_data_corrections_script import DataCorrectionsScript
import reduction_gui.widgets.util as util
try:
    from mantidqt.utils.qt import load_ui
except ImportError:
    from mantid.kernel import Logger
    Logger("DataCorrectionsWidget").information('Using legacy ui importer')
    from mantidplot import load_ui


class DataCorrectionsWidget(BaseWidget):
    """
        Widget that presents data correction options to the user.
    """
    ## Widget name
    name = "Data Corrections"

    _old_backgnd_sub = None
    _old_norm_button = None
    incident_beam_norm_grp = None

    def __init__(self, parent=None, state=None, settings=None, data_type=None):
コード例 #16
0
    def plotLogValue(self):
        """ Plot log value
        """
        # Get log value
        logname = str(self.ui.comboBox_2.currentText())
        if len(logname) == 0:
            # return due to the empty one is chozen
            return

        samplelog = self._dataWS.getRun().getProperty(logname)
        vectimes = samplelog.times
        vecvalue = samplelog.value

        # check
        if len(vectimes) == 0:
            error_msg = "Empty log!"
            Logger("Filter_Events").error(error_msg)

        #Convert absolute time to relative time in seconds
        t0 = self._dataWS.getRun().getProperty("proton_charge").times[0]
        t0ns = t0.totalNanoseconds()

        # append 1 more log if original log only has 1 value
        tf = self._dataWS.getRun().getProperty("proton_charge").times[-1]
        vectimes.append(tf)
        vecvalue = numpy.append(vecvalue, vecvalue[-1])

        vecreltimes = []
        for t in vectimes:
            rt = float(t.totalNanoseconds() - t0ns) * 1.0E-9
            vecreltimes.append(rt)

        # Set to plot
        xlim = [min(vecreltimes), max(vecreltimes)]
        ylim = [min(vecvalue), max(vecvalue)]
        self.ui.mainplot.set_xlim(xlim[0], xlim[1])
        self.ui.mainplot.set_ylim(ylim[0], ylim[1])

        setp(self.mainline, xdata=vecreltimes, ydata=vecvalue)

        samunit = samplelog.units
        if len(samunit) == 0:
            ylabel = logname
        else:
            ylabel = "%s (%s)" % (logname, samunit)
        self.ui.mainplot.set_ylabel(ylabel, fontsize=13)

        # assume that all logs are on almost same X-range.  Only Y need to be reset
        setp(self.leftslideline, ydata=ylim)
        setp(self.rightslideline, ydata=ylim)

        # reset the log value limit as previous one does not make any sense
        setp(self.lowerslideline, xdata=xlim, ydata=[ylim[0], ylim[0]])
        self._lowerSlideValue = 0
        self.ui.verticalSlider_2.setValue(self._lowerSlideValue)
        self.ui.lineEdit_5.setText("")

        setp(self.upperslideline, xdata=xlim, ydata=[ylim[1], ylim[1]])
        self._upperSlideValue = 100
        self.ui.verticalSlider.setValue(self._upperSlideValue)
        self.ui.lineEdit_6.setText("")

        self.ui.graphicsView.draw()

        # Load property's statistic and give suggestion on parallel and fast log
        timeavg = samplelog.timeAverageValue()
        numentries = samplelog.size()
        stat = samplelog.getStatistics()

        duration = stat.duration
        mean = stat.mean
        freq = float(numentries)/float(duration)

        self.ui.label_mean.show()
        self.ui.label_meanvalue.show()
        self.ui.label_avg.show()
        self.ui.label_timeAvgValue.show()
        self.ui.label_freq.show()
        self.ui.label_freqValue.show()
        self.ui.label_logname.show()
        self.ui.label_lognamevalue.show()
        self.ui.label_logsize.show()
        self.ui.label_logsizevalue.show()

        self.ui.label_meanvalue.setText("%.5e"%(mean))
        self.ui.label_timeAvgValue.setText("%.5e"%(timeavg))
        self.ui.label_freqValue.setText("%.5e"%(freq))
        self.ui.label_lognamevalue.setText(logname)
        self.ui.label_logsizevalue.setText(str(numentries))

        # Set suggested processing scheme
        if numentries > HUGE_FAST:
            self.ui.checkBox_fastLog.setCheckState(True)
            if numentries > HUGE_PARALLEL:
                self.ui.checkBox_doParallel.setCheckState(True)
            else:
                self.ui.checkBox_doParallel.setCheckState(False)
        else:
            self.ui.checkBox_fastLog.setCheckState(False)
            self.ui.checkBox_doParallel.setCheckState(False)

        return
コード例 #17
0
import numpy as np
import os
from shutil import copyfile

from mantid.api import WorkspaceGroup
from mantid.kernel import Logger
from mantid.simpleapi import (CloneWorkspace, config, ConjoinWorkspaces,
                              DeleteWorkspace, Load, LoadEventNexus, LoadNexus,
                              LoadSampleDetailsFromRaw, mtd, Rebin,
                              RenameWorkspace, SaveNexusProcessed,
                              UnGroupWorkspace)
from SANSUtility import (AddOperation, transfer_special_sample_logs,
                         bundle_added_event_data_as_group, WorkspaceType,
                         get_workspace_type, getFileAndName)

sanslog = Logger("SANS")
_NO_INDIVIDUAL_PERIODS = -1
ADD_FILES_SUM_TEMPORARY = "AddFilesSumTemporary"
ADD_FILES_SUM_TEMPORARY_MONITORS = "AddFilesSumTemporary_monitors"
ADD_FILES_NEW_TEMPORARY = "AddFilesNewTemporary"
ADD_FILES_NEW_TEMPORARY_MONITORS = "AddFilesNewTemporary_monitors"


def add_runs(
        runs,  # noqa: C901
        inst='sans2d',
        defType='.nxs',
        rawTypes=('.raw', '.s*', 'add', '.RAW'),
        lowMem=False,
        binning='Monitors',
        saveAsEvent=False,
コード例 #18
0
#pylint: disable=bare-except,invalid-name
import sys
# Check whether Mantid is available
try:
    from mantid.api import AnalysisDataService
    from mantid.kernel import Logger
    logger = Logger("hfir_data_proxy")
    import mantid.simpleapi as api
    HAS_MANTID = True
except ImportError:
    HAS_MANTID = False


class DataProxy(object):
    """
        Class used to load a data file temporarily to extract header information:
        HFIR SANS Data files have the following properties (parsed from the data file!)
        "sample-detector-distance-offset"
        "sample-detector-distance"
        "sample-si-window-distance"
        "sample_detector_distance"
    """
    wavelength = None
    wavelength_spread = None
    sample_detector_distance = None
    sample_detector_distance_offset = None
    sample_si_window_distance = None
    # If it was moved before that's where the distance is:
    sample_detector_distance_moved = None
    data = None
    data_ws = ''
コード例 #19
0
class SANSFileInformation(metaclass=ABCMeta):
    logger = Logger("SANS")

    def __init__(self, full_file_name):
        self._full_file_name = full_file_name

        # Idf and Ipf file path (will be loaded via lazy evaluation)
        self._idf_file_path = None
        self._ipf_file_path = None

        self._run_number = self._init_run_number()

    def __eq__(self, other):
        if type(other) is type(self):
            return self.__dict__ == other.__dict__
        return False

    @abstractmethod
    def get_file_name(self):
        pass

    @abstractmethod
    def get_instrument(self):
        pass

    @abstractmethod
    def get_facility(self):
        pass

    @abstractmethod
    def get_date(self):
        pass

    @abstractmethod
    def get_number_of_periods(self):
        pass

    @abstractmethod
    def get_type(self):
        pass

    @abstractmethod
    def is_event_mode(self):
        pass

    @abstractmethod
    def is_added_data(self):
        pass

    @abstractmethod
    def get_height(self):
        pass

    @abstractmethod
    def get_width(self):
        pass

    @abstractmethod
    def get_thickness(self):
        pass

    @abstractmethod
    def get_shape(self):
        pass

    def get_run_number(self):
        return self._run_number

    @abstractmethod
    def _get_run_number_from_file(self, file_name):
        pass

    def _init_run_number(self):
        # We don't use the nexus tagged file name as some instruments will take a file with
        # the "right structure" and transplant data from another in, then rename the recipient to the donor name

        run_filename = os.path.basename(self._full_file_name)

        # Split down all digits into separate groups
        run_number_list = re.findall(r"\d+", run_filename)
        # Filter out any single digit numbers, such as SANS-2-Dxxxx
        run_number_list = [
            run_number for run_number in run_number_list if len(run_number) > 1
        ]

        # Assume run number is largest value in the list
        run_number = max(run_number_list) if run_number_list else None

        if not run_number:
            run_number = self._get_run_number_from_file(self._full_file_name)
            self.logger.warning(
                "Could not parse run number from filename, using the run number direct set in the file which is {0}"
                .format(run_number))

        return int(run_number)

    def get_idf_file_path(self):
        if self._idf_file_path is None:
            idf_path, ipf_path = get_instrument_paths_for_sans_file(
                file_information=self)
            self._idf_file_path = idf_path
            self._ipf_file_path = ipf_path
        return self._idf_file_path

    def get_ipf_file_path(self):
        if self._ipf_file_path is None:
            idf_path, ipf_path = get_instrument_paths_for_sans_file(
                file_information=self)
            self._idf_file_path = idf_path
            self._ipf_file_path = ipf_path
        return self._ipf_file_path

    @staticmethod
    def get_full_file_name(file_name):
        return find_sans_file(file_name)
コード例 #20
0
 def __init__(self, view, exit_code, application='mantidplot'):
     self.error_log = Logger("error")
     self._view = view
     self._exit_code = exit_code
     self._application = application
     self._view.set_report_callback(self.error_handler)
コード例 #21
0
from __future__ import (absolute_import, division, print_function)
import os
from mantid.api import FileFinder
from sans.gui_logic.models.state_gui_model import StateGuiModel
from sans.gui_logic.presenter.gui_state_director import (GuiStateDirector)
from sans.user_file.user_file_reader import UserFileReader
from mantid.kernel import Logger
from sans.state.state import State

sans_logger = Logger("SANS")


def create_states(state_model,
                  table_model,
                  instrument,
                  facility,
                  row_index=None,
                  file_lookup=True):
    """
    Here we create the states based on the settings in the models
    :param state_model: the state model object
    :param table_model: the table model object
    :param row_index: the selected row, if None then all rows are generated
    """
    number_of_rows = table_model.get_number_of_rows()
    rows = [x for x in row_index if x < number_of_rows]

    states = {}
    errors = {}

    gui_state_director = GuiStateDirector(table_model, state_model, facility)
コード例 #22
0
ファイル: functions.py プロジェクト: luzpaz/mantid
from mantidqt.py3compat import is_text_string
from mantidqt.dialogs.spectraselectordialog import get_spectra_selection
from matplotlib.gridspec import GridSpec
import numpy as np

# local imports
from .figuretype import figure_type, FigureType

# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
PROJECTION = 'mantid'
# See https://matplotlib.org/api/_as_gen/matplotlib.figure.SubplotParams.html#matplotlib.figure.SubplotParams
SUBPLOT_WSPACE = 0.5
SUBPLOT_HSPACE = 0.5
LOGGER = Logger("workspace.plotting.functions")

# -----------------------------------------------------------------------------
# 'Public' Functions
# -----------------------------------------------------------------------------


def can_overplot():
    """
    Checks if overplotting on the current figure can proceed
    with the given options

    :return: A 2-tuple of boolean indicating compatability and
    a string containing an error message if the current figure is not
    compatible.
    """
コード例 #23
0
 def __init__(self, parent_presenter):
     self._view = None
     self._parent_presenter = parent_presenter
     self._work_handler = WorkHandler()
     self._logger = Logger("SANS")
コード例 #24
0
def beam_center_gravitational_drop(beam_center_file, sdd=1.13):
    '''
    This method is used for correcting for gravitational drop
    @param beam_center_file :: file where the beam center was found
    @param sdd :: sample detector distance to apply the beam center
    '''
    def calculate_neutron_drop(path_length, wavelength):
        '''
        Calculate the gravitational drop of the neutrons
        path_length in meters
        wavelength in Angstrom
        '''
        wavelength *= 1e-10
        neutron_mass = 1.674927211e-27
        gravity = 9.80665
        h_planck = 6.62606896e-34
        l_2 = (gravity * neutron_mass**2 /
               (2.0 * h_planck**2)) * path_length**2
        return wavelength**2 * l_2

    # Get beam center used in the previous reduction
    pm = mantid.PropertyManagerDataService[
        ReductionSingleton().property_manager]
    beam_center_x = pm['LatestBeamCenterX'].value
    beam_center_y = pm['LatestBeamCenterY'].value
    Logger("CommandInterface").information(
        "Beam Center before: [%.2f, %.2f] pixels" %
        (beam_center_x, beam_center_y))

    try:
        # check if the workspace still exists
        wsname = "__beam_finder_" + os.path.splitext(beam_center_file)[0]
        ws = mantid.mtd[wsname]
        Logger("CommandInterface").debug("Using Workspace: %s." % (wsname))
    except KeyError:
        # Let's try loading the file. For some reason the beamcenter ws is not there...
        try:
            ws = Load(beam_center_file)
            Logger("CommandInterface").debug("Using filename %s." %
                                             (beam_center_file))
        except IOError:
            Logger("CommandInterface").error("Cannot read input file %s." %
                                             beam_center_file)
            return

    i = ws.getInstrument()
    y_pixel_size_mm = i.getNumberParameter('y-pixel-size')[0]
    Logger("CommandInterface").debug("Y Pixel size = %.2f mm" %
                                     y_pixel_size_mm)
    y_pixel_size = y_pixel_size_mm * 1e-3  # In meters
    distance_detector1 = i.getComponentByName("detector1").getPos()[2]
    path_length = distance_detector1 - sdd
    Logger("CommandInterface").debug(
        "SDD detector1 = %.3f meters. SDD for wing = %.3f meters." %
        (distance_detector1, sdd))
    Logger("CommandInterface").debug(
        "Path length for gravitational drop = %.3f meters." % (path_length))
    r = ws.run()
    wavelength = r.getProperty("wavelength").value
    Logger("CommandInterface").debug("Wavelength = %.2f A." % (wavelength))

    drop = calculate_neutron_drop(path_length, wavelength)
    Logger("CommandInterface").debug("Gravitational drop = %.6f meters." %
                                     (drop))
    # 1 pixel -> y_pixel_size
    # x pixel -> drop
    drop_in_pixels = drop / y_pixel_size
    new_beam_center_y = beam_center_y + drop_in_pixels
    Logger("CommandInterface").information(
        "Beam Center after:   [%.2f, %.2f] pixels" %
        (beam_center_x, new_beam_center_y))
    return beam_center_x, new_beam_center_y
コード例 #25
0
ファイル: converterGUI.py プロジェクト: PeterParker/mantid
# Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI,
#     NScD Oak Ridge National Laboratory, European Spallation Source
#     & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
from __future__ import (absolute_import, division, print_function)
from qtpy.QtWidgets import QMainWindow, QMessageBox
from qtpy.QtGui import QDoubleValidator
from mantid.kernel import Logger
import math
import TofConverter.convertUnits

try:
    from mantidqt.utils.qt import load_ui
except ImportError:
    Logger("TofConverter").information('Using legacy ui importer')
    from mantidplot import load_ui


class MainWindow(QMainWindow):
    needsThetaInputList = [
        'Momentum transfer (Q Angstroms^-1)', 'd-spacing (Angstroms)'
    ]
    needsThetaOutputList = [
        'Momentum transfer (Q Angstroms^-1)', 'd-spacing (Angstroms)'
    ]
    needsFlightPathInputList = ['Time of flight (microseconds)']
    needsFlightPathOutputList = ['Time of flight (microseconds)']

    def thetaEnable(self, enabled):
        self.ui.scatteringAngleInput.setEnabled(enabled)
コード例 #26
0
ファイル: _aliases.py プロジェクト: yutiansut/mantid
        def __getitem__(self, item):
            return cls.__getattribute__(cls.Instance(), "__getitem__")(item)

        def __setitem__(self, item, value):
            return cls.__getattribute__(cls.Instance(), "__setitem__")(item,
                                                                       value)

        def __delitem__(self, item):
            return cls.__getattribute__(cls.Instance(), "__delitem__")(item)

        def __contains__(self, item):
            return cls.__getattribute__(cls.Instance(), "__contains__")(item)

    return LazySingletonHolder()


UsageService = lazy_instance_access(UsageServiceImpl)
ConfigService = lazy_instance_access(ConfigServiceImpl)
PropertyManagerDataService = lazy_instance_access(
    PropertyManagerDataServiceImpl)
UnitFactory = lazy_instance_access(UnitFactoryImpl)

config = ConfigService
pmds = PropertyManagerDataService

###############################################################################
# Set up a general Python logger. Others can be created as they are required
# if a user wishes to be more specific
###############################################################################
logger = Logger("Python")
コード例 #27
0
# Copyright &copy; 2020 ISIS Rutherford Appleton Laboratory UKRI,
#   NScD Oak Ridge National Laboratory, European Spallation Source,
#   Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=C0103
from mantidqtinterfaces.HFIR_4Circle_Reduction.hfctables import SinglePtIntegrationTable
from mantidqtinterfaces.HFIR_4Circle_Reduction.integratedpeakview import SinglePtIntegrationView
import mantidqtinterfaces.HFIR_4Circle_Reduction.guiutility as guiutility
import os
from qtpy.QtWidgets import (QMainWindow, QFileDialog)  # noqa
from qtpy.QtCore import Signal as pyqtSignal
from mantid.kernel import Logger
try:
    from mantidqt.utils.qt import load_ui
except ImportError:
    Logger("HFIR_4Circle_Reduction").information('Using legacy ui importer')
    from mantidplot import load_ui
from qtpy.QtWidgets import (QVBoxLayout)


class IntegrateSinglePtIntensityWindow(QMainWindow):
    """
    Main window widget to set up parameters to optimize
    """
    # establish signal for communicating from App2 to App1 - must be defined before the constructor
    scanIntegratedSignal = pyqtSignal(dict, name='SinglePtIntegrated')

    def __init__(self, parent=None):
        """
        Initialization
        :param parent:
コード例 #28
0
    def PyExec(self):
        state = self._get_state()
        state_serialized = state.property_manager
        logger = Logger("CentreFinder")
        logger.notice("Starting centre finder routine...")
        progress = self._get_progress()
        self.scale_1 = 1000
        self.scale_2 = 1000
        verbose = self.getProperty('Verbose').value
        x_start = self.getProperty("Position1Start").value
        y_start = self.getProperty("Position2Start").value

        sample_scatter = self._get_cloned_workspace("SampleScatterWorkspace")
        sample_scatter_monitor = self._get_cloned_workspace("SampleScatterMonitorWorkspace")
        sample_transmission = self._get_cloned_workspace("SampleTransmissionWorkspace")
        sample_direct = self._get_cloned_workspace("SampleDirectWorkspace")

        instrument = sample_scatter.getInstrument()
        if instrument.getName() == 'LARMOR':
            self.scale_1 = 1.0

        can_scatter = self._get_cloned_workspace("CanScatterWorkspace")
        can_scatter_monitor = self._get_cloned_workspace("CanScatterMonitorWorkspace")
        can_transmission = self._get_cloned_workspace("CanTransmissionWorkspace")
        can_direct = self._get_cloned_workspace("CanDirectWorkspace")

        component = self.getProperty("Component").value
        tolerance = self.getProperty("Tolerance").value
        max_iterations = self.getProperty("Iterations").value

        r_min = self.getProperty("RMin").value
        r_max = self.getProperty("RMax").value

        instrument_file = get_instrument_paths_for_sans_file(state.data.sample_scatter)
        position_1_step = get_named_elements_from_ipf_file(
            instrument_file[1], ["centre-finder-step-size"], float)['centre-finder-step-size']
        try:
            position_2_step = get_named_elements_from_ipf_file(
                instrument_file[1], ["centre-finder-step-size2"], float)['centre-finder-step-size2']
        except:
            position_2_step = position_1_step

        find_direction = self.getProperty("Direction").value
        if find_direction == FindDirectionEnum.to_string(FindDirectionEnum.Left_Right):
            position_2_step = 0.0
        elif find_direction == FindDirectionEnum.to_string(FindDirectionEnum.Up_Down):
            position_1_step = 0.0
        centre1 = x_start
        centre2 = y_start
        residueLR = []
        residueTB = []
        centre_1_hold = x_start
        centre_2_hold = y_start
        for j in range(0, max_iterations + 1):
            if(j != 0):
                centre1 += position_1_step
                centre2 += position_2_step

            progress.report("Reducing ... Pos1 " + str(centre1) + " Pos2 " + str(centre2))
            sample_quartiles = self._run_quartile_reduction(sample_scatter, sample_transmission, sample_direct,
                                                            "Sample", sample_scatter_monitor, component,
                                                            state_serialized, centre1, centre2, r_min, r_max)

            if can_scatter:
                can_quartiles = self._run_quartile_reduction(can_scatter, can_transmission, can_direct, "Can",
                                                             can_scatter_monitor, component, state_serialized, centre1,
                                                             centre2, r_min, r_max)
                for key in sample_quartiles:
                    sample_quartiles[key] = perform_can_subtraction(sample_quartiles[key], can_quartiles[key], self)

            if mantidplot:
                output_workspaces = self._publish_to_ADS(sample_quartiles)
                if verbose:
                    self._rename_and_group_workspaces(j, output_workspaces)

            residueLR.append(self._calculate_residuals(sample_quartiles[MaskingQuadrant.Left],
                                                       sample_quartiles[MaskingQuadrant.Right]))
            residueTB.append(self._calculate_residuals(sample_quartiles[MaskingQuadrant.Top],
                                                       sample_quartiles[MaskingQuadrant.Bottom]))
            if(j == 0):
                logger.notice("Itr " + str(j) + ": (" + str(self.scale_1 * centre1) + ", " + str(self.scale_2 * centre2) + ")  SX="
                              + str(residueLR[j]) + "  SY=" + str(residueTB[j]))
                if mantidplot:
                    self._plot_quartiles(output_workspaces, state.data.sample_scatter)

            else:
                # have we stepped across the y-axis that goes through the beam center?
                if residueLR[j] > residueLR[j-1]:
                    # yes with stepped across the middle, reverse direction and half the step size
                    position_1_step = - position_1_step / 2
                if residueTB[j] > residueTB[j-1]:
                    position_2_step = - position_2_step / 2

                logger.notice("Itr " + str(j) + ": (" + str(self.scale_1 * centre1) + ", " + str(self.scale_2 * centre2) + ")  SX="
                              + str(residueLR[j]) + "  SY=" + str(residueTB[j]))

                if (residueLR[j]+residueTB[j]) < (residueLR[j-1]+residueTB[j-1]) or state.compatibility.use_compatibility_mode:
                    centre_1_hold = centre1
                    centre_2_hold = centre2

                if abs(position_1_step) < tolerance and abs(position_2_step) < tolerance:
                    # this is the success criteria, we've close enough to the center
                    logger.notice("Converged - check if stuck in local minimum! ")
                    break

            if j == max_iterations:
                logger.notice("Out of iterations, new coordinates may not be the best")

        self.setProperty("Centre1", centre_1_hold)
        self.setProperty("Centre2", centre_2_hold)

        logger.notice("Centre coordinates updated: [{}, {}]".format(centre_1_hold*self.scale_1, centre_2_hold*self.scale_2))
コード例 #29
0
ファイル: scripter.py プロジェクト: gemmaguest/mantid
 def on_error(arg):
     Logger('scripter').error('Failed to execute script: {}'.format(arg))
コード例 #30
0
 def __init__(self, view, exit_code):
     self.error_log = Logger("error")
     self._view = view
     self._exit_code = exit_code
     self._view.action.connect(self.error_handler)