示例#1
0
    def convert(self):
        #Always reset these values before conversion.
        self.Theta = None
        self.flightpath = None
        try:
            if self.ui.InputVal.text() == "":
                raise RuntimeError("Input value is required for conversion")
            if float(self.ui.InputVal.text()) <= 0:
                raise RuntimeError(
                    "Input value must be greater than 0 for conversion")
            inOption = self.ui.inputUnits.currentText()
            outOption = self.ui.outputUnits.currentText()
            if self.ui.totalFlightPathInput.text():
                self.flightpath = float(self.ui.totalFlightPathInput.text())
            else:
                self.flightpath = -1.0
            if self.ui.scatteringAngleInput.text():
                self.Theta = float(
                    self.ui.scatteringAngleInput.text()) * math.pi / 360.0
            else:
                self.Theta = -1.0

            self.output = TofConverter.convertUnits.doConversion(
                self.ui.InputVal.text(), inOption, outOption, self.Theta,
                self.flightpath)

            self.ui.convertedVal.clear()
            self.ui.convertedVal.insert(str(self.output))
        except (UnboundLocalError, ArithmeticError, ValueError,
                RuntimeError) as err:
            QMessageBox.warning(self, "TofConverter", str(err))
            return
        except Exception as exc:
            Logger.error(exc)
            return
示例#2
0
class ErrorReporterPresenter(object):
    def __init__(self, view, exit_code):
        self.error_log = Logger("error")
        self._view = view
        self._exit_code = exit_code
        self._view.action.connect(self.error_handler)

    def error_handler(self, continue_working, share, name, email):
        if share == 0:
            errorReporter = ErrorReporter("mantidplot",
                                          UsageService.getUpTime(),
                                          self._exit_code, True, str(name),
                                          str(email))
            errorReporter.sendErrorReport()
        elif share == 1:
            errorReporter = ErrorReporter("mantidplot",
                                          UsageService.getUpTime(),
                                          self._exit_code, False, str(name),
                                          str(email))
            errorReporter.sendErrorReport()

        if not continue_working:
            self.error_log.error("Terminated by user.")
            self._view.quit()
        else:
            self.error_log.error("Continue working.")

    def show_view(self):
        self._view.show()
示例#3
0
    def test_unicode_logger(self):
        logger = Logger("LoggerTest")
        self.assertTrue(isinstance(logger, Logger))
        for att in ['fatal', 'error', 'warning', 'notice', 'information', 'debug']:
            if not hasattr(logger, att):
                self.fail("Logger object does not have the required attribute '%s'" % att)

        logger.fatal('This is a test')
        logger.error('This is a test')
        logger.warning('This is a test')
        logger.notice('This is a test')
        logger.information('This is a test')
        logger.debug('This is a test')
示例#4
0
    def test_unicode_logger(self):
        logger = Logger("LoggerTest")
        self.assertTrue(isinstance(logger, Logger))
        for att in ['fatal', 'error', 'warning', 'notice', 'information', 'debug']:
            if not hasattr(logger, att):
                self.fail("Logger object does not have the required attribute '%s'" % att)

        logger.fatal('This is a test')
        logger.error('This is a test')
        logger.warning('This is a test')
        logger.notice('This is a test')
        logger.information('This is a test')
        logger.debug('This is a test')
示例#5
0
class ErrorReporterPresenter(object):
    def __init__(self, view, exit_code):
        self.error_log = Logger("error")
        self._view = view
        self._exit_code = exit_code
        self._view.action.connect(self.error_handler)

    def error_handler(self, continue_working, share, name, email, textBox):
        status = -1
        if share == 0:
            errorReporter = ErrorReporter(
                "mantidplot", UsageService.getUpTime(), self._exit_code, True, str(name), str(email), str(textBox))
            status = errorReporter.sendErrorReport()
        elif share == 1:
            errorReporter = ErrorReporter(
                "mantidplot", UsageService.getUpTime(), self._exit_code, False, str(name), str(email), str(textBox))
            status = errorReporter.sendErrorReport()

        if status != 201:
            self._view.display_message_box('Error contacting server','There was an error when sending the report.'
                                           'Please contact [email protected] directly',
                                           'http request returned with status {}'.format(status))
            self.error_log.error("Failed to send error report http request returned status {}".format(status))

        if not continue_working:
            self.error_log.error("Terminated by user.")
            self._view.quit()
        else:
            self.error_log.error("Continue working.")

    def show_view(self):
        self._view.show()
示例#6
0
    def test_unicode_logger(self):
        logger = Logger("LoggerTest")
        self.assertTrue(isinstance(logger, Logger))
        for att in [
                'fatal', 'error', 'warning', 'notice', 'information', 'debug',
                'flush', 'purge', 'accumulate', 'flushDebug',
                'flushInformation', 'flushNotice', 'flushWarning',
                'flushError', 'flushFatal'
        ]:
            if not hasattr(logger, att):
                self.fail(
                    "Logger object does not have the required attribute '%s'" %
                    att)

        logger.fatal('This is a test')
        logger.error('This is a test')
        logger.warning('This is a test')
        logger.notice('This is a test')
        logger.information('This is a test')
        logger.debug('This is a test')
        logger.purge()
        logger.accumulate('one')
        logger.accumulate('two')
        logger.flush()
        logger.accumulate('three')
        logger.flushDebug()
        logger.accumulate('four')
        logger.flushInformation()
        logger.accumulate('five')
        logger.flushNotice()
        logger.accumulate('six')
        logger.flushWarning()
        logger.accumulate('seven')
        logger.flushError()
        logger.accumulate('eight')
        logger.flushFatal()
        logger.purge()
示例#7
0
class SettingsDiagnosticPresenter(object):
    class ConcreteSettingsDiagnosticTabListener(
            SettingsDiagnosticTab.SettingsDiagnosticTabListener):
        def __init__(self, presenter):
            super(
                SettingsDiagnosticPresenter.
                ConcreteSettingsDiagnosticTabListener, self).__init__()
            self._presenter = presenter

        def on_row_changed(self):
            self._presenter.on_row_changed()

        def on_update_rows(self):
            self._presenter.on_update_rows()

        def on_collapse(self):
            self._presenter.on_collapse()

        def on_expand(self):
            self._presenter.on_expand()

        def on_save_state_to_file(self):
            self._presenter.on_save_state()

    def __init__(self, parent_presenter):
        self._view = None
        self._parent_presenter = parent_presenter
        # Logger
        self.gui_logger = Logger("SANS GUI LOGGER")

    def on_collapse(self):
        self._view.collapse()

    def on_expand(self):
        self._view.expand()

    def on_row_changed(self):
        try:
            row_index = self._view.get_current_row()
            state = self.get_state(row_index)
            if state:
                self.display_state_diagnostic_tree(state)
        except RuntimeError as e:
            self.gui_logger.error(str(e))
            self._parent_presenter.display_warning_box(
                'Warning', 'Unable to find files.', str(e))

    def on_update_rows(self):
        """
        Update the row selection in the combobox
        """
        current_row_index = self._view.get_current_row()
        valid_row_indices = self._parent_presenter.get_row_indices()

        new_row_index = -1
        if current_row_index in valid_row_indices:
            new_row_index = current_row_index
        elif len(valid_row_indices) > 0:
            new_row_index = valid_row_indices[0]

        self._view.update_rows(valid_row_indices)

        if new_row_index != -1:
            self.set_row(new_row_index)
            self.on_row_changed()

    def set_row(self, index):
        self._view.set_row(index)

    def set_view(self, view):
        if view:
            self._view = view

            # Set up row selection listener
            listener = SettingsDiagnosticPresenter.ConcreteSettingsDiagnosticTabListener(
                self)
            self._view.add_listener(listener)

            # Set the default gui
            self._set_default_gui()

    def _set_default_gui(self):
        self._view.update_rows([])
        self.display_state_diagnostic_tree(state=None)

    def get_state(self, index) -> AllStates:
        return self._parent_presenter.get_state_for_row(index)

    def display_state_diagnostic_tree(self, state):
        # Convert to dict before passing the state to the view
        dict_vals = None

        if state:
            state = Serializer.to_json(state)
            dict_vals = json.loads(
                state
            )  # We intentionally do not use serializer to get a dict type back

        self._view.set_tree(dict_vals)

    def on_save_state(self):
        UsageService.registerFeatureUsage(
            FeatureType.Feature,
            ["ISIS SANS", "Settings Diagnostics - Save JSON"], False)
        # Get the save location
        save_location = self._view.get_save_location()
        # Check if it exists
        path_dir = os.path.dirname(save_location)
        if not path_dir:
            self.gui_logger.warning(
                "The provided save location for the SANS state does not seem to exist. "
                "Please provide a validate path")
            return

        file_name, _ = os.path.splitext(save_location)
        full_file_path = file_name + JSON_SUFFIX

        row_index = self._view.get_current_row()
        state = self.get_state(row_index)
        Serializer.save_file(state, full_file_path)
        self.gui_logger.information(
            "The state for row {} has been saved to: {} ".format(
                row_index, full_file_path))

        # Update the file name in the UI
        self._view.set_save_location(full_file_path)
示例#8
0
class BeamCentreAsync(IQtAsync):
    def __init__(self, parent_presenter: 'BeamCentrePresenter'):
        super().__init__()
        self._parent_presenter = parent_presenter
        self._logger = Logger("CentreFinder")

    def success_cb_slot(self, result: AsyncTaskSuccess) -> None:
        self._parent_presenter.on_update_centre_values(result.output)

    def finished_cb_slot(self) -> None:
        self._parent_presenter.on_processing_finished_centre_finder()

    def error_cb_slot(self, result: AsyncTaskFailure) -> None:
        self._parent_presenter.on_processing_error_centre_finder(str(result))

    @qt_async_task
    def find_beam_centre(self, state: AllStates,
                         settings: BeamCentreFields) -> Optional[Dict]:
        """
        This is called from the GUI and runs the find beam centre algorithm given a state model and a beam_centre_model object.

        :param state: A SANS state object
        :param settings: A class containing relevant fields for the beam settings
        :returns: The centre position found.
        """
        centre_finder = SANSCentreFinder()
        if not settings.find_direction:
            self._logger.error("Have chosen no find direction exiting early")
            return

        pos_1 = settings.lab_pos_1 if settings.component is DetectorType.LAB else settings.hab_pos_1
        pos_2 = settings.lab_pos_2 if settings.component is DetectorType.LAB else settings.hab_pos_2

        if settings.centre_of_mass:
            centre = centre_finder(state,
                                   r_min=settings.r_min,
                                   r_max=settings.r_max,
                                   max_iter=settings.max_iterations,
                                   x_start=pos_1,
                                   y_start=pos_2,
                                   tolerance=settings.tolerance,
                                   find_direction=settings.find_direction,
                                   reduction_method=False,
                                   component=settings.component)

            centre = centre_finder(state,
                                   r_min=settings.r_min,
                                   r_max=settings.r_max,
                                   max_iter=settings.max_iterations,
                                   x_start=centre['pos1'],
                                   y_start=centre['pos2'],
                                   tolerance=settings.tolerance,
                                   find_direction=settings.find_direction,
                                   reduction_method=True,
                                   verbose=settings.verbose,
                                   component=settings.component)
        else:
            centre = centre_finder(state,
                                   r_min=settings.r_min,
                                   r_max=settings.r_max,
                                   max_iter=settings.max_iterations,
                                   x_start=pos_1,
                                   y_start=pos_2,
                                   tolerance=settings.tolerance,
                                   find_direction=settings.find_direction,
                                   reduction_method=True,
                                   verbose=settings.verbose,
                                   component=settings.component)

        return centre
class ErrorReporterPresenter(object):
    def __init__(self, view, exit_code):
        self.error_log = Logger("error")
        self._view = view
        self._exit_code = exit_code
        self._view.set_report_callback(self.error_handler)

    def do_not_share(self, continue_working=True):
        self.error_log.notice("No information shared")
        self._handle_exit(continue_working)
        return -1

    def share_non_identifiable_information(self, continue_working):
        uptime = UsageService.getUpTime()
        status = self._send_report_to_server(share_identifiable=False, uptime=uptime)
        self.error_log.notice("Sent non-identifiable information")
        self._handle_exit(continue_working)
        return status

    def share_all_information(self, continue_working, name, email, text_box):
        uptime = UsageService.getUpTime()
        try:
            recovery_archive, file_hash = zip_recovery_directory()
        except Exception as exc:
            self.error_log.information("Error creating recovery archive: {}. No recovery information will be sent")
            recovery_archive, file_hash = None, ""
        status = self._send_report_to_server(share_identifiable=True, uptime=uptime, name=name, email=email, file_hash=file_hash,
                                             text_box=text_box)
        self.error_log.notice("Sent full information")
        if status == 201 and recovery_archive:
            self._upload_recovery_file(recovery_archive=recovery_archive)
            try:
                os.remove(recovery_archive)
            except OSError as exc:
                self.error_log.information("Unable to remove zipped recovery information: {}".format(str(exc)))

        self._handle_exit(continue_working)
        return status

    def error_handler(self, continue_working, share, name, email, text_box):
        if share == 0:
            status = self.share_all_information(continue_working, name, email, text_box)
        elif share == 1:
            status = self.share_non_identifiable_information(continue_working)
        elif share == 2:
            status = self.do_not_share(continue_working)
        else:
            self.error_log.error("Unrecognised signal in errorreporter exiting")
            self._handle_exit(continue_working)
            status = -2

        return status

    def _handle_exit(self, continue_working):
        if not continue_working:
            self.error_log.error("Terminated by user.")
            self._view.quit()
        else:
            self.error_log.error("Continue working.")

    def _upload_recovery_file(self, recovery_archive):
        url = ConfigService['errorreports.rooturl']
        url = '{}/api/recovery'.format(url)
        files = {'file': open('{}'.format(recovery_archive), 'rb')}
        response = requests.post(url, files=files)
        if response.status_code == 201:
            self.error_log.notice("Uploaded recovery file to server. HTTP response {}".format(response.status_code))
        else:
            self.error_log.error("Failed to send recovery data HTTP response {}".format(response.status_code))

    def _send_report_to_server(self, share_identifiable=False, name='', email='', file_hash='', uptime='', text_box=''):
        errorReporter = ErrorReporter(
            "mantidplot", uptime, self._exit_code, share_identifiable, str(name), str(email), str(text_box),
            str(file_hash))
        status = errorReporter.sendErrorReport()

        if status != 201:
            self._view.display_message_box('Error contacting server', 'There was an error when sending the report.'
                                                                      'Please contact [email protected] directly',
                                           'http request returned with status {}'.format(status))
            self.error_log.error("Failed to send error report http request returned status {}".format(status))

        return status

    def show_view(self):
        self._view.show()
示例#10
0
class MaskingTablePresenter(object):
    DISPLAY_WORKSPACE_NAME = "__sans_mask_display_dummy_workspace"

    class ConcreteMaskingTableListener(MaskingTable.MaskingTableListener):
        def __init__(self, presenter):
            super(MaskingTablePresenter.ConcreteMaskingTableListener, self).__init__()
            self._presenter = presenter

        def on_row_changed(self):
            pass

        def on_update_rows(self):
            self._presenter.on_update_rows()

        def on_display(self):
            self._presenter.on_display()

    class DisplayMaskListener(WorkHandler.WorkListener):
        def __init__(self, presenter):
            super(MaskingTablePresenter.DisplayMaskListener, self).__init__()
            self._presenter = presenter

        def on_processing_finished(self, result):
            self._presenter.on_processing_finished_masking_display(result)

        def on_processing_error(self, error):
            self._presenter.on_processing_error_masking_display(error)

    def __init__(self, parent_presenter):
        self._view = None
        self._parent_presenter = parent_presenter
        self._work_handler = WorkHandler()
        self._logger = Logger("SANS")

    def on_display(self):
        # Get the state information for the selected row.
        # Disable the button
        self._view.set_display_mask_button_to_processing()
        try:
            row_index = self._view.get_current_row()
            state = self.get_state(row_index)
        except Exception as e:
            self.on_processing_error_masking_display(e)
            raise e  # propagate errors for run_tab_presenter to deal with

        if not state:
            self._logger.error("You can only show a masked workspace if a user file has been loaded and a"
                               "valid sample scatter entry has been provided in the selected row.")
            self._view.set_display_mask_button_to_normal()
            return

        # Run the task
        self.display_masking_information(state)
        listener = MaskingTablePresenter.DisplayMaskListener(self)
        state_copy = copy.copy(state)
        self._work_handler.process(listener, load_and_mask_workspace, 0, state_copy, self.DISPLAY_WORKSPACE_NAME)

    def on_processing_finished_masking_display(self, result):
        # Enable button
        self._view.set_display_mask_button_to_normal()

        # Display masked workspace
        self._display(result)

    def on_processing_error_masking_display(self, error):
        self._logger.warning("There has been an error. See more: {}".format(error))
        # Enable button
        self._view.set_display_mask_button_to_normal()

    def on_processing_error(self, error):
        pass

    def on_update_rows(self):
        """
        Update the row selection in the combobox
        """
        current_row_index = self._view.get_current_row()
        valid_row_indices = self._parent_presenter.get_row_indices()

        new_row_index = -1
        if current_row_index in valid_row_indices:
            new_row_index = current_row_index
        elif len(valid_row_indices) > 0:
            new_row_index = valid_row_indices[0]

        self._view.update_rows(valid_row_indices)

        if new_row_index != -1:
            self.set_row(new_row_index)

    def set_row(self, index):
        self._view.set_row(index)

    def set_view(self, view):
        if view:
            self._view = view

            # Set up row selection listener
            listener = MaskingTablePresenter.ConcreteMaskingTableListener(self)
            self._view.add_listener(listener)

            # Set the default gui
            self._set_default_gui()

    def _set_default_gui(self):
        self._view.update_rows([])
        self.display_masking_information(state=None)

    def get_state(self, index, file_lookup=True, suppress_warnings=False):
        return self._parent_presenter.get_state_for_row(index, file_lookup=file_lookup,
                                                        suppress_warnings=suppress_warnings)

    @staticmethod
    def _append_single_spectrum_mask(spectrum_mask, container, detector_name, prefix):
        if spectrum_mask:
            for item in spectrum_mask:
                detail = prefix + str(item)
                container.append(masking_information(first="Spectrum", second=detector_name, third=detail))

    @staticmethod
    def _append_strip_spectrum_mask(strip_mask_start, strip_mask_stop, container, detector_name, prefix):
        if strip_mask_start and strip_mask_stop:
            for start, stop in zip(strip_mask_start, strip_mask_stop):
                detail = prefix + str(start) + ">" + prefix + str(stop)
                container.append(masking_information(first="Strip", second=detector_name, third=detail))

    @staticmethod
    def _append_block_spectrum_mask(horizontal_mask_start, horizontal_mask_stop, vertical_mask_start,
                                    vertical_mask_stop, container, detector_name):
        if horizontal_mask_start and horizontal_mask_stop and vertical_mask_start and vertical_mask_stop:
            for h_start, h_stop, v_start, v_stop in zip(horizontal_mask_start, horizontal_mask_stop,
                                                        vertical_mask_start, vertical_mask_stop):
                detail = "H{}>H{}+V{}>V{}".format(h_start, h_stop, v_start, v_stop)
                container.append(masking_information(first="Strip", second=detector_name, third=detail))

    @staticmethod
    def _append_spectrum_block_cross_mask(horizontal_mask, vertical_mask, container, detector_name):
        if horizontal_mask and vertical_mask:
            for h, v in zip(horizontal_mask, vertical_mask):
                detail = "H{}+V{}".format(h, v)
                container.append(masking_information(first="Strip", second=detector_name, third=detail))

    @staticmethod
    def _get_spectrum_masks(mask_detector_info):
        detector_name = mask_detector_info.detector_name
        spectrum_masks = []

        # -------------------------------
        # Get the vertical spectrum masks
        # -------------------------------
        single_vertical_strip_mask = mask_detector_info.single_vertical_strip_mask
        MaskingTablePresenter._append_single_spectrum_mask(single_vertical_strip_mask, spectrum_masks,
                                                           detector_name, "V")

        range_vertical_strip_start = mask_detector_info.range_vertical_strip_start
        range_vertical_strip_stop = mask_detector_info.range_vertical_strip_stop
        MaskingTablePresenter._append_strip_spectrum_mask(range_vertical_strip_start,
                                                          range_vertical_strip_stop,
                                                          spectrum_masks, detector_name, "V")

        # ---------------------------------
        # Get the horizontal spectrum masks
        # ---------------------------------
        single_horizontal_strip_mask = mask_detector_info.single_horizontal_strip_mask
        MaskingTablePresenter._append_single_spectrum_mask(single_horizontal_strip_mask, spectrum_masks,
                                                           detector_name, "H")

        range_horizontal_strip_start = mask_detector_info.range_horizontal_strip_start
        range_horizontal_strip_stop = mask_detector_info.range_horizontal_strip_stop
        MaskingTablePresenter._append_strip_spectrum_mask(range_horizontal_strip_start,
                                                          range_horizontal_strip_stop,
                                                          spectrum_masks, detector_name, "H")

        # ---------------------------------
        # Get the block masks
        # ---------------------------------
        block_horizontal_start = mask_detector_info.block_horizontal_start
        block_horizontal_stop = mask_detector_info.block_horizontal_stop
        block_vertical_start = mask_detector_info.block_vertical_start
        block_vertical_stop = mask_detector_info.block_vertical_stop
        MaskingTablePresenter._append_block_spectrum_mask(block_horizontal_start, block_horizontal_stop,
                                                          block_vertical_start, block_vertical_stop,
                                                          spectrum_masks, detector_name)

        block_cross_horizontal = mask_detector_info.block_cross_horizontal
        block_cross_vertical = mask_detector_info.block_cross_vertical
        MaskingTablePresenter._append_spectrum_block_cross_mask(block_cross_horizontal, block_cross_vertical,
                                                                spectrum_masks, detector_name)

        # ---------------------------------
        # Get spectrum masks
        # ---------------------------------
        single_spectra = mask_detector_info.single_spectra
        MaskingTablePresenter._append_single_spectrum_mask(single_spectra, spectrum_masks,
                                                           detector_name, "S")

        spectrum_range_start = mask_detector_info.spectrum_range_start
        spectrum_range_stop = mask_detector_info.spectrum_range_stop
        MaskingTablePresenter._append_strip_spectrum_mask(spectrum_range_start,
                                                          spectrum_range_stop,
                                                          spectrum_masks, detector_name, "S")

        return spectrum_masks

    @staticmethod
    def _get_time_masks_general(mask_info):
        container = []
        bin_mask_general_start = mask_info.bin_mask_general_start
        bin_mask_general_stop = mask_info.bin_mask_general_stop
        if bin_mask_general_start and bin_mask_general_stop:
            for start, stop in zip(bin_mask_general_start, bin_mask_general_stop):
                detail = "{}-{}".format(start, stop)
                container.append(masking_information(first="Time", second="", third=detail))
        return container

    @staticmethod
    def _get_time_masks(mask_info):
        container = []
        bin_mask_start = mask_info.bin_mask_start
        bin_mask_stop = mask_info.bin_mask_stop
        detector_name = mask_info.detector_name
        if bin_mask_start and bin_mask_stop:
            for start, stop in zip(bin_mask_start, bin_mask_stop):
                detail = "{}-{}".format(start, stop)
                container.append(masking_information(first="Time", second=detector_name, third=detail))
        return container

    @staticmethod
    def _get_arm_mask(mask_info):
        container = []
        beam_stop_arm_width = mask_info.beam_stop_arm_width
        beam_stop_arm_angle = mask_info.beam_stop_arm_angle
        beam_stop_arm_pos1 = mask_info.beam_stop_arm_pos1 if mask_info.beam_stop_arm_pos1 else 0.
        beam_stop_arm_pos2 = mask_info.beam_stop_arm_pos2 if mask_info.beam_stop_arm_pos2 else 0.
        if beam_stop_arm_width and beam_stop_arm_angle:
            detail = "LINE {}, {}, {}, {}".format(beam_stop_arm_width, beam_stop_arm_angle,
                                                  beam_stop_arm_pos1, beam_stop_arm_pos2)
            container.append(masking_information(first="Arm", second="", third=detail))
        return container

    @staticmethod
    def _get_phi_mask(mask_info):
        container = []
        phi_min = mask_info.phi_min
        phi_max = mask_info.phi_max
        use_mask_phi_mirror = mask_info.use_mask_phi_mirror
        if phi_min and phi_max:
            if use_mask_phi_mirror:
                detail = "L/PHI {} {}".format(phi_min, phi_max)
            else:
                detail = "L/PHI/NOMIRROR{} {}".format(phi_min, phi_max)
            container.append(masking_information(first="Phi", second="", third=detail))
        return container

    @staticmethod
    def _get_mask_files(mask_info):
        container = []
        mask_files = mask_info.mask_files
        if mask_files:
            for mask_file in mask_files:
                container.append(masking_information(first="Mask file", second="", third=mask_file))
        return container

    @staticmethod
    def _get_radius(mask_info):
        container = []
        radius_min = mask_info.radius_min
        radius_max = mask_info.radius_max

        if radius_min:
            detail = "infinite-cylinder, r = {}".format(radius_min)
            container.append(masking_information(first="Beam stop", second="", third=detail))

        if radius_max:
            detail = "infinite-cylinder, r = {}".format(radius_max)
            container.append(masking_information(first="Corners", second="", third=detail))
        return container

    def _generate_masking_information(self, state):
        if state is None:
            return []
        mask_info = state.mask
        masks = []

        mask_info_lab = mask_info.detectors[DetectorType.LAB.value]
        mask_info_hab = mask_info.detectors[DetectorType.HAB.value] if DetectorType.HAB.value in mask_info.detectors else None  # noqa

        # Add the radius mask
        radius_mask = self._get_radius(mask_info)
        masks.extend(radius_mask)

        # Add the spectrum masks for LAB
        spectrum_masks_lab = self._get_spectrum_masks(mask_info_lab)
        masks.extend(spectrum_masks_lab)

        # Add the spectrum masks for HAB
        if mask_info_hab:
            spectrum_masks_hab = self._get_spectrum_masks(mask_info_hab)
            masks.extend(spectrum_masks_hab)

        # Add the general time mask
        time_masks_general = self._get_time_masks_general(mask_info)
        masks.extend(time_masks_general)

        # Add the time masks for LAB
        time_masks_lab = self._get_time_masks(mask_info_lab)
        masks.extend(time_masks_lab)

        # Add the time masks for HAB
        if mask_info_hab:
            time_masks_hab = self._get_time_masks(mask_info_hab)
            masks.extend(time_masks_hab)

        # Add arm mask
        arm_mask = self._get_arm_mask(mask_info)
        masks.extend(arm_mask)

        # Add phi mask
        phi_mask = self._get_phi_mask(mask_info)
        masks.extend(phi_mask)

        # Add mask files
        mask_files = self._get_mask_files(mask_info)
        masks.extend(mask_files)
        return masks

    def get_masking_information(self, state):
        table_entries = []
        if state is not None:
            table_entries = self._generate_masking_information(state)
        return table_entries

    def display_masking_information(self, state):
        table_entries = self.get_masking_information(state)
        self._view.set_table(table_entries)

    @staticmethod
    def _display(masked_workspace):
        if masked_workspace and AnalysisDataService.doesExist(masked_workspace.name()):
            if PYQT4:
                instrument_win = mantidplot.getInstrumentView(masked_workspace.name())
                instrument_win.show()
            else:
                instrument_win = InstrumentViewPresenter(masked_workspace)
                instrument_win.container.show()
class ErrorReporterPresenter(object):
    SENDING_ERROR_MESSAGE = 'There was an error when sending the report.\nPlease contact [email protected] directly'

    def __init__(self, view, exit_code, application='mantidplot'):
        self.error_log = Logger("error")
        self._view = view
        self._exit_code = exit_code
        self._application = application
        self._view.set_report_callback(self.error_handler)

    def do_not_share(self, continue_working=True):
        self.error_log.notice("No information shared")
        self._handle_exit(continue_working)
        return -1

    def share_non_identifiable_information(self, continue_working):
        uptime = UsageService.getUpTime()
        status = self._send_report_to_server(share_identifiable=False, uptime=uptime)
        self.error_log.notice("Sent non-identifiable information")
        self._handle_exit(continue_working)
        return status

    def share_all_information(self, continue_working, name, email, text_box):
        uptime = UsageService.getUpTime()
        try:
            recovery_archive, file_hash = zip_recovery_directory()
        except Exception as exc:
            self.error_log.information("Error creating recovery archive: {}. No recovery information will be sent")
            recovery_archive, file_hash = None, ""
        status = self._send_report_to_server(share_identifiable=True, uptime=uptime, name=name, email=email,
                                             file_hash=file_hash, text_box=text_box)
        self.error_log.notice("Sent full information")
        if status == 201 and recovery_archive:
            self._upload_recovery_file(recovery_archive=recovery_archive)
            try:
                os.remove(recovery_archive)
            except OSError as exc:
                self.error_log.information("Unable to remove zipped recovery information: {}".format(str(exc)))

        self._handle_exit(continue_working)
        return status

    def error_handler(self, continue_working, share, name, email, text_box):
        if share == 0:
            status = self.share_all_information(continue_working, name, email, text_box)
        elif share == 1:
            status = self.share_non_identifiable_information(continue_working)
        elif share == 2:
            status = self.do_not_share(continue_working)
        else:
            self.error_log.error("Unrecognised signal in errorreporter exiting")
            self._handle_exit(continue_working)
            status = -2

        return status

    def _handle_exit(self, continue_working):
        if not continue_working:
            self.error_log.error("Terminated by user.")
            self._view.quit()
        else:
            self.error_log.error("Continue working.")

    def _upload_recovery_file(self, recovery_archive):
        url = ConfigService['errorreports.rooturl']
        url = '{}/api/recovery'.format(url)
        self.error_log.notice("Sending recovery file to address: {}".format(url))
        files = {'file': open('{}'.format(recovery_archive), 'rb')}
        try:
            # timeout after 20 seconds to match the C++ error reporter timeout
            response = requests.post(url, files=files, timeout=20)
        except Exception as e:
            self.error_log.error(
                "Failed to send recovery data. Could not establish connection to URL: {}.\n\nFull trace:\n\n{}".format(
                    url, e))
            return

        # if this is reached, the connection was successful and some response was received
        if response.status_code == 201:
            self.error_log.notice("Uploaded recovery file to server. HTTP response {}".format(response.status_code))
        elif response.status_code == 413:
            self.error_log.notice(
                "Data was too large, and was not accepted by the server. HTTP response {}".format(response.status_code))
        else:
            self.error_log.error("Failed to send recovery data. HTTP response {}".format(response.status_code))

    def _send_report_to_server(self, share_identifiable=False, name='', email='', file_hash='', uptime='', text_box=''):
        errorReporter = ErrorReporter(
            self._application, uptime, self._exit_code, share_identifiable, str(name), str(email), str(text_box),
            str(file_hash))
        status = errorReporter.sendErrorReport()

        if status != 201:
            self._view.display_message_box('Error contacting server', self.SENDING_ERROR_MESSAGE,
                                           'http request returned with status {}'.format(status))
            self.error_log.error("Failed to send error report http request returned status {}".format(status))

        return status

    def show_view(self):
        self._view.show()

    def show_view_blocking(self):
        self._view.exec_()
class SettingsDiagnosticPresenter(object):
    class ConcreteSettingsDiagnosticTabListener(SettingsDiagnosticTab.SettingsDiagnosticTabListener):
        def __init__(self, presenter):
            super(SettingsDiagnosticPresenter.ConcreteSettingsDiagnosticTabListener, self).__init__()
            self._presenter = presenter

        def on_row_changed(self):
            self._presenter.on_row_changed()

        def on_update_rows(self):
            self._presenter.on_update_rows()

        def on_collapse(self):
            self._presenter.on_collapse()

        def on_expand(self):
            self._presenter.on_expand()

        def on_save_state_to_file(self):
            self._presenter.on_save_state()

    def __init__(self, parent_presenter):
        super(SettingsDiagnosticPresenter, self).__init__()
        self._view = None
        self._parent_presenter = parent_presenter
        # Logger
        self.gui_logger = Logger("SANS GUI LOGGER")

    def on_collapse(self):
        self._view.collapse()

    def on_expand(self):
        self._view.expand()

    def on_row_changed(self):
        try:
            row_index = self._view.get_current_row()
            state = self.get_state(row_index)
            if state:
                self.display_state_diagnostic_tree(state)
        except RuntimeError as e:
            self.gui_logger.error(str(e))
            self._parent_presenter.display_warning_box('Warning', 'Unable to find files.', str(e))

    def on_update_rows(self):
        """
        Update the row selection in the combobox
        """
        current_row_index = self._view.get_current_row()
        valid_row_indices = self._parent_presenter.get_row_indices()

        new_row_index = -1
        if current_row_index in valid_row_indices:
            new_row_index = current_row_index
        elif len(valid_row_indices) > 0:
            new_row_index = valid_row_indices[0]

        self._view.update_rows(valid_row_indices)

        if new_row_index != -1:
            self.set_row(new_row_index)
            self.on_row_changed()

    def set_row(self, index):
        self._view.set_row(index)

    def set_view(self, view):
        if view:
            self._view = view

            # Set up row selection listener
            listener = SettingsDiagnosticPresenter.ConcreteSettingsDiagnosticTabListener(self)
            self._view.add_listener(listener)

            # Set the default gui
            self._set_default_gui()

    def _set_default_gui(self):
        self._view.update_rows([])
        self.display_state_diagnostic_tree(state=None)

    def get_state(self, index):
        return self._parent_presenter.get_state_for_row(index)

    def display_state_diagnostic_tree(self, state):
        # Convert to dict before passing the state to the view
        if state is not None:
            state = state.property_manager
        self._view.set_tree(state)

    def on_save_state(self):
        # Get the save location
        save_location = self._view.get_save_location()
        # Check if it exists
        path_dir = os.path.dirname(save_location)
        if not path_dir:
            self.gui_logger.warning("The provided save location for the SANS state does not seem to exist. "
                                    "Please provide a validate path")
            return

        file_name, _ = os.path.splitext(save_location)
        full_file_path = file_name + JSON_SUFFIX

        row_index = self._view.get_current_row()
        state = self.get_state(row_index)
        serialized_state = state.property_manager
        with open(full_file_path, 'w') as f:
            json.dump(serialized_state, f, sort_keys=True, indent=4)
        self.gui_logger.information("The state for row {} has been saved to: {} ".format(row_index, full_file_path))

        # Update the file name in the UI
        self._view.set_save_location(full_file_path)
示例#13
0
class ErrorReporterPresenter(object):
    def __init__(self, view, exit_code):
        self.error_log = Logger("error")
        self._view = view
        self._exit_code = exit_code
        self._view.set_report_callback(self.error_handler)

    def do_not_share(self, continue_working=True):
        self.error_log.notice("No information shared")
        self._handle_exit(continue_working)
        return -1

    def share_non_identifiable_information(self, continue_working):
        uptime = UsageService.getUpTime()
        status = self._send_report_to_server(share_identifiable=False,
                                             uptime=uptime)
        self.error_log.notice("Sent non-identifiable information")
        self._handle_exit(continue_working)
        return status

    def share_all_information(self, continue_working, name, email, text_box):
        uptime = UsageService.getUpTime()
        try:
            recovery_archive, file_hash = zip_recovery_directory()
        except Exception as exc:
            self.error_log.information(
                "Error creating recovery archive: {}. No recovery information will be sent"
            )
            recovery_archive, file_hash = None, ""
        status = self._send_report_to_server(share_identifiable=True,
                                             uptime=uptime,
                                             name=name,
                                             email=email,
                                             file_hash=file_hash,
                                             text_box=text_box)
        self.error_log.notice("Sent full information")
        if status == 201 and recovery_archive:
            self._upload_recovery_file(recovery_archive=recovery_archive)
            try:
                os.remove(recovery_archive)
            except OSError as exc:
                self.error_log.information(
                    "Unable to remove zipped recovery information: {}".format(
                        str(exc)))

        self._handle_exit(continue_working)
        return status

    def error_handler(self, continue_working, share, name, email, text_box):
        if share == 0:
            status = self.share_all_information(continue_working, name, email,
                                                text_box)
        elif share == 1:
            status = self.share_non_identifiable_information(continue_working)
        elif share == 2:
            status = self.do_not_share(continue_working)
        else:
            self.error_log.error(
                "Unrecognised signal in errorreporter exiting")
            self._handle_exit(continue_working)
            status = -2

        return status

    def _handle_exit(self, continue_working):
        if not continue_working:
            self.error_log.error("Terminated by user.")
            self._view.quit()
        else:
            self.error_log.error("Continue working.")

    def _upload_recovery_file(self, recovery_archive):
        url = ConfigService['errorreports.rooturl']
        url = '{}/api/recovery'.format(url)
        files = {'file': open('{}'.format(recovery_archive), 'rb')}
        response = requests.post(url, files=files)
        if response.status_code == 201:
            self.error_log.notice(
                "Uploaded recovery file to server. HTTP response {}".format(
                    response.status_code))
        else:
            self.error_log.error(
                "Failed to send recovery data HTTP response {}".format(
                    response.status_code))

    def _send_report_to_server(self,
                               share_identifiable=False,
                               name='',
                               email='',
                               file_hash='',
                               uptime='',
                               text_box=''):
        errorReporter = ErrorReporter("mantidplot", uptime,
                                      self._exit_code, share_identifiable,
                                      str(name), str(email), str(text_box),
                                      str(file_hash))
        status = errorReporter.sendErrorReport()

        if status != 201:
            self._view.display_message_box(
                'Error contacting server',
                'There was an error when sending the report.'
                'Please contact [email protected] directly',
                'http request returned with status {}'.format(status))
            self.error_log.error(
                "Failed to send error report http request returned status {}".
                format(status))

        return status

    def show_view(self):
        self._view.show()
示例#14
0
class BatchProcessRunner(QObject):
    row_processed_signal = Signal(int, list, list)
    row_failed_signal = Signal(int, str)

    def __init__(self, notify_progress, notify_done, notify_error):
        super(BatchProcessRunner, self).__init__()
        self.row_processed_signal.connect(notify_progress)
        self.row_failed_signal.connect(notify_error)
        self.notify_done = notify_done
        self.batch_processor = SANSBatchReduction()
        self._logger = Logger("SANS")
        self._worker = None

    @Slot()
    def on_finished(self):
        result = self._worker.result if self._worker else None
        self._worker = None
        self.notify_done(result)

    @Slot()
    def on_error(self):
        self._worker = None

    def process_states(self,
                       row_index_pair,
                       get_states_func,
                       use_optimizations,
                       output_mode,
                       plot_results,
                       output_graph,
                       save_can=False):
        self._worker = Worker(self._process_states_on_thread,
                              row_index_pair=row_index_pair,
                              get_states_func=get_states_func,
                              use_optimizations=use_optimizations,
                              output_mode=output_mode,
                              plot_results=plot_results,
                              output_graph=output_graph,
                              save_can=save_can)
        self._worker.signals.finished.connect(self.on_finished)
        self._worker.signals.error.connect(self.on_error)

        QThreadPool.globalInstance().start(self._worker)

    def load_workspaces(self, row_index_pair, get_states_func):

        self._worker = Worker(self._load_workspaces_on_thread, row_index_pair,
                              get_states_func)
        self._worker.signals.finished.connect(self.on_finished)
        self._worker.signals.error.connect(self.on_error)

        QThreadPool.globalInstance().start(self._worker)

    def _process_states_on_thread(self,
                                  row_index_pair,
                                  get_states_func,
                                  use_optimizations,
                                  output_mode,
                                  plot_results,
                                  output_graph,
                                  save_can=False):
        for row, index in row_index_pair:

            # TODO update the get_states_func to support one per call
            try:
                states, errors = get_states_func(row_entries=[row])
            except Exception as e:
                self._handle_err(index, e)
                continue

            assert len(states) + len(errors) == 1, \
                "Expected 1 error to return got {0}".format(len(states) + len(errors))

            for error in errors.values():
                self.row_failed_signal.emit(index, error)

            for state in states.values():
                try:
                    out_scale_factors, out_shift_factors = \
                        self.batch_processor([state], use_optimizations, output_mode, plot_results, output_graph, save_can)
                except Exception as e:
                    self._handle_err(index, e)
                    continue

                if state.reduction.reduction_mode == ReductionMode.MERGED:
                    out_shift_factors = out_shift_factors[0]
                    out_scale_factors = out_scale_factors[0]
                else:
                    out_shift_factors = []
                    out_scale_factors = []
                self.row_processed_signal.emit(index, out_shift_factors,
                                               out_scale_factors)

    def _load_workspaces_on_thread(self, row_index_pair, get_states_func):
        for row, index in row_index_pair:
            try:
                states, errors = get_states_func(row_entries=[row])
            except Exception as e:
                self._handle_err(index, e)
                continue

            for error in errors.values():
                self.row_failed_signal.emit(index, error)

            for state in states.values():
                try:
                    load_workspaces_from_states(state)
                    self.row_processed_signal.emit(index, [], [])
                except Exception as e:
                    self._handle_err(index, e)
                    continue

    def _handle_err(self, index, e):
        # We manually have to extract out the traceback, since going to a str for Qt signals will strip this
        self._logger.error(''.join(traceback.format_tb(e.__traceback__)))
        self._logger.error(str(e))
        self.row_failed_signal.emit(index, str(e))
示例#15
0
class ErrorReporterPresenter(object):
    SENDING_ERROR_MESSAGE = 'There was an error when sending the report.\nPlease contact [email protected] directly'

    def __init__(self, view, exit_code, application='mantidplot'):
        self.error_log = Logger("error")
        self._view = view
        self._exit_code = exit_code
        self._application = application
        self._view.set_report_callback(self.error_handler)

    def do_not_share(self, continue_working=True):
        self.error_log.notice("No information shared")
        self._handle_exit(continue_working)
        return -1

    def share_non_identifiable_information(self, continue_working):
        uptime = UsageService.getUpTime()
        status = self._send_report_to_server(share_identifiable=False,
                                             uptime=uptime)
        self.error_log.notice("Sent non-identifiable information")
        self._handle_exit(continue_working)
        return status

    def share_all_information(self, continue_working, name, email, text_box):
        uptime = UsageService.getUpTime()
        try:
            recovery_archive, file_hash = zip_recovery_directory()
        except Exception as exc:
            self.error_log.information(
                "Error creating recovery archive: {}. No recovery information will be sent"
            )
            recovery_archive, file_hash = None, ""
        status = self._send_report_to_server(share_identifiable=True,
                                             uptime=uptime,
                                             name=name,
                                             email=email,
                                             file_hash=file_hash,
                                             text_box=text_box)
        self.error_log.notice("Sent full information")
        if status == 201 and recovery_archive:
            self._upload_recovery_file(recovery_archive=recovery_archive)
            try:
                os.remove(recovery_archive)
            except OSError as exc:
                self.error_log.information(
                    "Unable to remove zipped recovery information: {}".format(
                        str(exc)))

        self._handle_exit(continue_working)
        return status

    def error_handler(self, continue_working, share, name, email, text_box):
        if share == 0:
            status = self.share_all_information(continue_working, name, email,
                                                text_box)
        elif share == 1:
            status = self.share_non_identifiable_information(continue_working)
        elif share == 2:
            status = self.do_not_share(continue_working)
        else:
            self.error_log.error(
                "Unrecognised signal in errorreporter exiting")
            self._handle_exit(continue_working)
            status = -2

        return status

    def _handle_exit(self, continue_working):
        if not continue_working:
            self.error_log.error("Terminated by user.")
            self._view.quit()
        else:
            self.error_log.error("Continue working.")

    def _upload_recovery_file(self, recovery_archive):
        url = ConfigService['errorreports.rooturl']
        url = '{}/api/recovery'.format(url)
        self.error_log.notice(
            "Sending recovery file to address: {}".format(url))
        files = {'file': open('{}'.format(recovery_archive), 'rb')}
        try:
            # timeout after 20 seconds to match the C++ error reporter timeout
            response = requests.post(url, files=files, timeout=20)
        except Exception as e:
            self.error_log.error(
                "Failed to send recovery data. Could not establish connection to URL: {}.\n\nFull trace:\n\n{}"
                .format(url, e))
            return

        # if this is reached, the connection was successful and some response was received
        if response.status_code == 201:
            self.error_log.notice(
                "Uploaded recovery file to server. HTTP response {}".format(
                    response.status_code))
        elif response.status_code == 413:
            self.error_log.notice(
                "Data was too large, and was not accepted by the server. HTTP response {}"
                .format(response.status_code))
        else:
            self.error_log.error(
                "Failed to send recovery data. HTTP response {}".format(
                    response.status_code))

    def _send_report_to_server(self,
                               share_identifiable=False,
                               name='',
                               email='',
                               file_hash='',
                               uptime='',
                               text_box=''):
        errorReporter = ErrorReporter(self._application, uptime,
                                      self._exit_code, share_identifiable,
                                      str(name), str(email), str(text_box),
                                      str(file_hash))
        status = errorReporter.sendErrorReport()

        if status != 201:
            self._view.display_message_box(
                'Error contacting server', self.SENDING_ERROR_MESSAGE,
                'http request returned with status {}'.format(status))
            self.error_log.error(
                "Failed to send error report http request returned status {}".
                format(status))

        return status

    def show_view(self):
        self._view.show()

    def show_view_blocking(self):
        self._view.exec_()
示例#16
0
class RunTabPresenter(object):
    class ConcreteRunTabListener(SANSDataProcessorGui.RunTabListener):
        def __init__(self, presenter):
            super(RunTabPresenter.ConcreteRunTabListener, self).__init__()
            self._presenter = presenter

        def on_user_file_load(self):
            self._presenter.on_user_file_load()

        def on_mask_file_add(self):
            self._presenter.on_mask_file_add()

        def on_batch_file_load(self):
            self._presenter.on_batch_file_load()

        def on_processed_clicked(self):
            self._presenter.on_processed_clicked()

        def on_processing_finished(self):
            self._presenter.on_processing_finished()

        def on_data_changed(self):
            self._presenter.on_data_changed()

        def on_manage_directories(self):
            self._presenter.on_manage_directories()

    def __init__(self, facility, view=None):
        super(RunTabPresenter, self).__init__()
        self._facility = facility

        # Logger
        self.sans_logger = Logger("SANS")

        # Presenter needs to have a handle on the view since it delegates it
        self._view = None
        self.set_view(view)

        # Models that are being used by the presenter
        self._state_model = None
        self._table_model = None

        # Due to the nature of the DataProcessorWidget we need to provide an algorithm with at least one input
        # workspace and at least one output workspace. Our SANS state approach is not compatible with this. Hence
        # we provide a dummy workspace which is not used. We keep it invisible on the ADS and delete it when the
        # main_presenter is deleted.
        # This is not a nice solution but in line with the SANS dummy algorithm approach that we have provided
        # for the
        self._create_dummy_input_workspace()

        # File information for the first input
        self._file_information = None

        # Settings diagnostic tab presenter
        self._settings_diagnostic_tab_presenter = SettingsDiagnosticPresenter(
            self)

        # Masking table presenter
        self._masking_table_presenter = MaskingTablePresenter(self)

        # Beam centre presenter
        self._beam_centre_presenter = BeamCentrePresenter(self)

    def __del__(self):
        self._delete_dummy_input_workspace()

    def _default_gui_setup(self):
        """
        Provides a default setup of the GUI. This is important for the initial start up, when the view is being set.
        """
        # Set the possible reduction modes
        reduction_mode_list = get_reduction_mode_strings_for_gui()
        self._view.set_reduction_modes(reduction_mode_list)

        # Set the step type options for wavelength
        range_step_types = [
            RangeStepType.to_string(RangeStepType.Lin),
            RangeStepType.to_string(RangeStepType.Log)
        ]
        self._view.wavelength_step_type = range_step_types

        # Set the geometry options. This needs to include the option to read the sample shape from file.
        sample_shape = [
            "Read from file",
            SampleShape.to_string(SampleShape.CylinderAxisUp),
            SampleShape.to_string(SampleShape.Cuboid),
            SampleShape.to_string(SampleShape.CylinderAxisAlong)
        ]
        self._view.sample_shape = sample_shape

        # Set the q range
        self._view.q_1d_step_type = range_step_types
        self._view.q_xy_step_type = range_step_types

        # Set the fit options
        fit_types = [
            FitType.to_string(FitType.Linear),
            FitType.to_string(FitType.Logarithmic),
            FitType.to_string(FitType.Polynomial)
        ]
        self._view.transmission_sample_fit_type = fit_types
        self._view.transmission_can_fit_type = fit_types

    # ------------------------------------------------------------------------------------------------------------------
    # Table + Actions
    # ------------------------------------------------------------------------------------------------------------------
    def set_view(self, view):
        """
        Sets the view
        :param view: the view is the SANSDataProcessorGui. The presenter needs to access some of the API
        """
        if view is not None:
            self._view = view

            # Add a listener to the view
            listener = RunTabPresenter.ConcreteRunTabListener(self)
            self._view.add_listener(listener)

            # Default gui setup
            self._default_gui_setup()

            # Set appropriate view for the state diagnostic tab presenter
            self._settings_diagnostic_tab_presenter.set_view(
                self._view.settings_diagnostic_tab)

            # Set appropriate view for the masking table presenter
            self._masking_table_presenter.set_view(self._view.masking_table)

            # Set the appropriate view for the beam centre presenter
            self._beam_centre_presenter.set_view(self._view.beam_centre)

    def on_user_file_load(self):
        """
        Loads the user file. Populates the models and the view.
        """
        try:
            # 1. Get the user file path from the view
            user_file_path = self._view.get_user_file_path()

            if not user_file_path:
                return
            # 2. Get the full file path
            user_file_path = FileFinder.getFullPath(user_file_path)
            if not os.path.exists(user_file_path):
                raise RuntimeError(
                    "The user path {} does not exist. Make sure a valid user file path"
                    " has been specified.".format(user_file_path))

            # Clear out the current view
            self._view.reset_all_fields_to_default()

            # 3. Read and parse the user file
            user_file_reader = UserFileReader(user_file_path)
            user_file_items = user_file_reader.read_user_file()

            # 4. Populate the model
            self._state_model = StateGuiModel(user_file_items)
            # 5. Update the views.
            self._update_view_from_state_model()

            # 6. Perform calls on child presenters
            self._masking_table_presenter.on_update_rows()
            self._settings_diagnostic_tab_presenter.on_update_rows()
            self._beam_centre_presenter.on_update_rows()

        except Exception as e:
            self.sans_logger.error(
                "Loading of the user file failed. Ensure that the path to your files has been added "
                "to the Mantid search directories! See here for more details: {}"
                .format(str(e)))

    def on_batch_file_load(self):
        """
        Loads a batch file and populates the batch table based on that.
        """
        try:
            # 1. Get the batch file from the view
            batch_file_path = self._view.get_batch_file_path()

            if not batch_file_path:
                return

            if not os.path.exists(batch_file_path):
                raise RuntimeError(
                    "The batch file path {} does not exist. Make sure a valid batch file path"
                    " has been specified.".format(batch_file_path))

            # 2. Read the batch file
            batch_file_parser = BatchCsvParser(batch_file_path)
            parsed_rows = batch_file_parser.parse_batch_file()
            # 3. Clear the table
            self._view.clear_table()

            # 4. Populate the table
            for row in parsed_rows:
                self._populate_row_in_table(row)

            # 5. Populate the selected instrument and the correct detector selection
            self._setup_instrument_specific_settings()

            # 6. Perform calls on child presenters
            self._masking_table_presenter.on_update_rows()
            self._settings_diagnostic_tab_presenter.on_update_rows()
            self._beam_centre_presenter.on_update_rows()

        except RuntimeError as e:
            self.sans_logger.error(
                "Loading of the batch file failed. Ensure that the path to your files has been added"
                " to the Mantid search directories! See here for more details: {}"
                .format(str(e)))

    def on_data_changed(self):
        # 1. Populate the selected instrument and the correct detector selection
        self._setup_instrument_specific_settings()

        # 2. Perform calls on child presenters
        self._masking_table_presenter.on_update_rows()
        self._settings_diagnostic_tab_presenter.on_update_rows()
        self._beam_centre_presenter.on_update_rows()

    def on_processed_clicked(self):
        """
        Prepares the batch reduction.

        0. Validate rows and create dummy workspace if it does not exist
        1. Sets up the states
        2. Adds a dummy input workspace
        3. Adds row index information
        """

        try:
            self.sans_logger.information("Starting processing of batch table.")
            # 0. Validate rows
            self._create_dummy_input_workspace()
            self._validate_rows()

            # 1. Set up the states and convert them into property managers
            states = self.get_states()
            if not states:
                raise RuntimeError(
                    "There seems to have been an issue with setting the states. Make sure that a user file"
                    " has been loaded")
            property_manager_service = PropertyManagerService()
            property_manager_service.add_states_to_pmds(states)

            # 2. Add dummy input workspace to Options column
            self._remove_dummy_workspaces_and_row_index()
            self._set_dummy_workspace()

            # 3. Add dummy row index to Options column
            self._set_indices()
        except Exception as e:
            self._view.halt_process_flag()
            self.sans_logger.error("Process halted due to: {}".format(str(e)))

    def on_processing_finished(self):
        self._remove_dummy_workspaces_and_row_index()

    def on_manage_directories(self):
        self._view.show_directory_manager()

    def on_mask_file_add(self):
        """
        We get the added mask file name and add it to the list of masks
        """
        new_mask_file = self._view.get_mask_file()
        if not new_mask_file:
            return
        new_mask_file_full_path = FileFinder.getFullPath(new_mask_file)
        if not new_mask_file_full_path:
            return

        # Add the new mask file to state model
        mask_files = self._state_model.mask_files

        mask_files.append(new_mask_file)
        self._state_model.mask_files = mask_files

        # Make sure that the sub-presenters are up to date with this change
        self._masking_table_presenter.on_update_rows()
        self._settings_diagnostic_tab_presenter.on_update_rows()
        self._beam_centre_presenter.on_update_rows()

    def _add_to_hidden_options(self, row, property_name, property_value):
        """
        Adds a new property to the Hidden Options column

        @param row: The row where the Options column is being altered
        @param property_name: The property name on the GUI algorithm.
        @param property_value: The value which is being set for the property.
        """
        entry = property_name + OPTIONS_EQUAL + str(property_value)
        options = self._get_hidden_options(row)
        if options:
            options += OPTIONS_SEPARATOR + entry
        else:
            options = entry
        self._set_hidden_options(options, row)

    def _set_hidden_options(self, value, row):
        self._view.set_cell(value, row, HIDDEN_OPTIONS_INDEX)

    def _get_options(self, row):
        return self._view.get_cell(row, OPTIONS_INDEX, convert_to=str)

    def _get_hidden_options(self, row):
        return self._view.get_cell(row, HIDDEN_OPTIONS_INDEX, convert_to=str)

    def is_empty_row(self, row):
        """
        Checks if a row has no entries. These rows will be ignored.
        :param row: the row index
        :return: True if the row is empty.
        """
        indices = range(OPTIONS_INDEX + 1)
        for index in indices:
            cell_value = self._view.get_cell(row, index, convert_to=str)
            if cell_value:
                return False
        return True

    def _remove_from_hidden_options(self, row, property_name):
        """
        Remove the entries in the hidden options column
        :param row: the row index
        :param property_name: the property name which is to be removed
        """
        options = self._get_hidden_options(row)
        # Remove the property entry and the value
        individual_options = options.split(",")
        clean_options = []
        for individual_option in individual_options:
            if property_name not in individual_option:
                clean_options.append(individual_option)
        clean_options = ",".join(clean_options)
        self._set_hidden_options(clean_options, row)

    def _validate_rows(self):
        """
        Validation of the rows. A minimal setup requires that ScatterSample is set.
        """
        # If SampleScatter is empty, then don't run the reduction.
        # We allow empty rows for now, since we cannot remove them from Python.
        number_of_rows = self._view.get_number_of_rows()
        for row in range(number_of_rows):
            if not self.is_empty_row(row):
                sample_scatter = self._view.get_cell(row, 0)
                if not sample_scatter:
                    raise RuntimeError(
                        "Row {} has not SampleScatter specified. Please correct this."
                        .format(row))

    def get_processing_options(self):
        """
        Creates a processing string for the data processor widget

        :return: A processing string for the data processor widget
        """
        global_options = ""

        # Check if optimizations should be used
        optimization_selection = "UseOptimizations=1" if self._view.use_optimizations else "UseOptimizations=0"
        global_options += optimization_selection

        # Get the output mode
        output_mode = self._view.output_mode
        output_mode_selection = "OutputMode=" + OutputMode.to_string(
            output_mode)
        global_options += ","
        global_options += output_mode_selection

        return global_options

    # ------------------------------------------------------------------------------------------------------------------
    # Controls
    # ------------------------------------------------------------------------------------------------------------------
    def disable_controls(self):
        """
        Disable all input fields and buttons during the execution of the reduction.
        """
        # TODO: think about enabling and disable some controls during reduction
        pass

    def enable_controls(self):
        """
        Enable all input fields and buttons after the execution has completed.
        """
        # TODO: think about enabling and disable some controls during reduction
        pass

    # ------------------------------------------------------------------------------------------------------------------
    # Table Model and state population
    # ------------------------------------------------------------------------------------------------------------------
    def get_states(self, row_index=None):
        """
        Gathers the state information for all rows.
        :param row_index: if a single row is selected, then only this row is returned, else all the state for all
                             rows is returned
        :return: a list of states
        """
        start_time_state_generation = time.time()

        # 1. Update the state model
        state_model_with_view_update = self._get_state_model_with_view_update()

        # 2. Update the table model
        table_model = self._get_table_model()

        # 3. Go through each row and construct a state object
        if table_model and state_model_with_view_update:
            states = self._create_states(state_model_with_view_update,
                                         table_model, row_index)
        else:
            states = None
        stop_time_state_generation = time.time()
        time_taken = stop_time_state_generation - start_time_state_generation
        self.sans_logger.information(
            "The generation of all states took {}s".format(time_taken))
        return states

    def get_row_indices(self):
        """
        Gets the indices of row which are not empty.
        :return: a list of row indices.
        """
        row_indices_which_are_not_empty = []
        number_of_rows = self._view.get_number_of_rows()
        for row in range(number_of_rows):
            if not self.is_empty_row(row):
                row_indices_which_are_not_empty.append(row)
        return row_indices_which_are_not_empty

    def get_state_for_row(self, row_index):
        """
        Creates the state for a particular row.
        :param row_index: the row index
        :return: a state if the index is valid and there is a state else None
        """
        states = self.get_states(row_index=row_index)
        if states is None:
            self.sans_logger.warning(
                "There does not seem to be data for a row {}.".format(
                    row_index))
            return None

        if row_index in list(states.keys()):
            if states:
                return states[row_index]
        return None

    def _update_view_from_state_model(self):
        # Front tab view
        self._set_on_view("zero_error_free")
        self._set_on_view("save_types")
        self._set_on_view("compatibility_mode")

        self._set_on_view("merge_scale")
        self._set_on_view("merge_shift")
        self._set_on_view("merge_scale_fit")
        self._set_on_view("merge_shift_fit")
        self._set_on_view("merge_q_range_start")
        self._set_on_view("merge_q_range_stop")

        # Settings tab view
        self._set_on_view("reduction_dimensionality")
        self._set_on_view("reduction_mode")
        self._set_on_view("event_slices")
        self._set_on_view("event_binning")
        self._set_on_view("merge_mask")

        self._set_on_view("wavelength_step_type")
        self._set_on_view("wavelength_min")
        self._set_on_view("wavelength_max")
        self._set_on_view("wavelength_step")

        self._set_on_view("absolute_scale")
        self._set_on_view("sample_shape")
        self._set_on_view("sample_height")
        self._set_on_view("sample_width")
        self._set_on_view("sample_thickness")
        self._set_on_view("z_offset")

        # Adjustment tab
        self._set_on_view("normalization_incident_monitor")
        self._set_on_view("normalization_interpolate")

        self._set_on_view("transmission_incident_monitor")
        self._set_on_view("transmission_interpolate")
        self._set_on_view("transmission_roi_files")
        self._set_on_view("transmission_mask_files")
        self._set_on_view("transmission_radius")
        self._set_on_view("transmission_monitor")
        self._set_on_view("transmission_mn_shift")
        self._set_on_view("show_transmission")

        self._set_on_view_transmission_fit()

        self._set_on_view("pixel_adjustment_det_1")
        self._set_on_view("pixel_adjustment_det_2")
        self._set_on_view("wavelength_adjustment_det_1")
        self._set_on_view("wavelength_adjustment_det_2")

        # Q tab
        self._set_on_view_q_rebin_string()
        self._set_on_view("q_xy_max")
        self._set_on_view("q_xy_step")
        self._set_on_view("q_xy_step_type")

        self._set_on_view("gravity_on_off")
        self._set_on_view("gravity_extra_length")

        self._set_on_view("use_q_resolution")
        self._set_on_view_q_resolution_aperture()
        self._set_on_view("q_resolution_delta_r")
        self._set_on_view("q_resolution_collimation_length")
        self._set_on_view("q_resolution_moderator_file")

        # Mask
        self._set_on_view("phi_limit_min")
        self._set_on_view("phi_limit_max")
        self._set_on_view("phi_limit_use_mirror")
        self._set_on_view("radius_limit_min")
        self._set_on_view("radius_limit_max")

        # Beam Centre
        self._beam_centre_presenter.set_on_view('lab_pos_1', self._state_model)
        self._beam_centre_presenter.set_on_view('lab_pos_2', self._state_model)
        self._beam_centre_presenter.set_on_view('hab_pos_1', self._state_model)
        self._beam_centre_presenter.set_on_view('hab_pos_2', self._state_model)

    def _set_on_view_transmission_fit_sample_settings(self):
        # Set transmission_sample_use_fit
        fit_type = self._state_model.transmission_sample_fit_type
        use_fit = fit_type is not FitType.NoFit
        self._view.transmission_sample_use_fit = use_fit

        # Set the polynomial order for sample
        polynomial_order = self._state_model.transmission_sample_polynomial_order if fit_type is FitType.Polynomial else 2  # noqa
        self._view.transmission_sample_polynomial_order = polynomial_order

        # Set the fit type for the sample
        fit_type = fit_type if fit_type is not FitType.NoFit else FitType.Linear
        self._view.transmission_sample_fit_type = fit_type

        # Set the wavelength
        wavelength_min = self._state_model.transmission_sample_wavelength_min
        wavelength_max = self._state_model.transmission_sample_wavelength_max
        if wavelength_min and wavelength_max:
            self._view.transmission_sample_use_wavelength = True
            self._view.transmission_sample_wavelength_min = wavelength_min
            self._view.transmission_sample_wavelength_max = wavelength_max

    def _set_on_view_transmission_fit(self):
        # Steps for adding the transmission fit to the view
        # 1. Check if individual settings exist. If so then set the view to separate, else set them to both
        # 2. Apply the settings
        separate_settings = self._state_model.has_transmission_fit_got_separate_settings_for_sample_and_can(
        )
        self._view.set_fit_selection(use_separate=separate_settings)

        if separate_settings:
            self._set_on_view_transmission_fit_sample_settings()

            # Set transmission_sample_can_fit
            fit_type_can = self._state_model.transmission_can_fit_type()
            use_can_fit = fit_type_can is FitType.NoFit
            self._view.transmission_can_use_fit = use_can_fit

            # Set the polynomial order for can
            polynomial_order_can = self._state_model.transmission_can_polynomial_order if fit_type_can is FitType.Polynomial else 2  # noqa
            self._view.transmission_can_polynomial_order = polynomial_order_can

            # Set the fit type for the can
            fit_type_can = fit_type_can if fit_type_can is not FitType.NoFit else FitType.Linear
            self.transmission_can_fit_type = fit_type_can

            # Set the wavelength
            wavelength_min = self._state_model.transmission_can_wavelength_min
            wavelength_max = self._state_model.transmission_can_wavelength_max
            if wavelength_min and wavelength_max:
                self._view.transmission_can_use_wavelength = True
                self._view.transmission_can_wavelength_min = wavelength_min
                self._view.transmission_can_wavelength_max = wavelength_max
        else:
            self._set_on_view_transmission_fit_sample_settings()

    def _set_on_view_q_resolution_aperture(self):
        self._set_on_view("q_resolution_source_a")
        self._set_on_view("q_resolution_sample_a")
        self._set_on_view("q_resolution_source_h")
        self._set_on_view("q_resolution_sample_h")
        self._set_on_view("q_resolution_source_w")
        self._set_on_view("q_resolution_sample_w")

        # If we have h1, h2, w1, and w2 selected then we want to select the rectangular aperture.
        is_rectangular = self._state_model.q_resolution_source_h and self._state_model.q_resolution_sample_h and \
                         self._state_model.q_resolution_source_w and self._state_model.q_resolution_sample_w  # noqa
        self._view.set_q_resolution_shape_to_rectangular(is_rectangular)

    def _set_on_view_q_rebin_string(self):
        """
        Maps the q_1d_rebin_string of the model to the q_1d_step and q_1d_step_type property of the view.
        """
        rebin_string = self._state_model.q_1d_rebin_string
        # Extract the min, max and step and step type from the rebin string
        elements = rebin_string.split(",")
        # If we have three elements then we want to set only the
        if len(elements) == 3:
            step_element = float(elements[1])
            step = abs(step_element)
            step_type = RangeStepType.Lin if step_element >= 0 else RangeStepType.Log

            # Set on the view
            self._view.q_1d_min_or_rebin_string = float(elements[0])
            self._view.q_1d_max = float(elements[2])
            self._view.q_1d_step = step
            self._view.q_1d_step_type = step_type
        else:
            # Set the rebin string
            self._view.q_1d_min_or_rebin_string = rebin_string
            self._view.q_1d_step_type = self._view.VARIABLE

    def _set_on_view(self, attribute_name):
        attribute = getattr(self._state_model, attribute_name)
        if attribute or isinstance(
                attribute, bool
        ):  # We need to be careful here. We don't want to set empty strings, or None, but we want to set boolean values. # noqa
            setattr(self._view, attribute_name, attribute)

    def _set_on_view_with_view(self, attribute_name, view):
        attribute = getattr(self._state_model, attribute_name)
        if attribute or isinstance(
                attribute, bool
        ):  # We need to be careful here. We don't want to set empty strings, or None, but we want to set boolean values. # noqa
            setattr(view, attribute_name, attribute)

    def _get_state_model_with_view_update(self):
        """
        Goes through all sub presenters and update the state model based on the views.

        Note that at the moment we have set up the view and the model such that the name of a property must be the same
        in the view and the model. This can be easily changed, but it also provides a good cohesion.
        """
        state_model = copy.deepcopy(self._state_model)

        # If we don't have a state model then return None
        if state_model is None:
            return state_model
        # Run tab view
        self._set_on_state_model("zero_error_free", state_model)
        self._set_on_state_model("save_types", state_model)
        self._set_on_state_model("compatibility_mode", state_model)
        self._set_on_state_model("merge_scale", state_model)
        self._set_on_state_model("merge_shift", state_model)
        self._set_on_state_model("merge_scale_fit", state_model)
        self._set_on_state_model("merge_shift_fit", state_model)
        self._set_on_state_model("merge_q_range_start", state_model)
        self._set_on_state_model("merge_q_range_stop", state_model)
        self._set_on_state_model("merge_mask", state_model)
        self._set_on_state_model("merge_max", state_model)
        self._set_on_state_model("merge_min", state_model)

        # Settings tab
        self._set_on_state_model("reduction_dimensionality", state_model)
        self._set_on_state_model("reduction_mode", state_model)
        self._set_on_state_model("event_slices", state_model)
        self._set_on_state_model("event_binning", state_model)

        self._set_on_state_model("wavelength_step_type", state_model)
        self._set_on_state_model("wavelength_min", state_model)
        self._set_on_state_model("wavelength_max", state_model)
        self._set_on_state_model("wavelength_step", state_model)

        self._set_on_state_model("absolute_scale", state_model)
        self._set_on_state_model("sample_shape", state_model)
        self._set_on_state_model("sample_height", state_model)
        self._set_on_state_model("sample_width", state_model)
        self._set_on_state_model("sample_thickness", state_model)
        self._set_on_state_model("z_offset", state_model)

        # Adjustment tab
        self._set_on_state_model("normalization_incident_monitor", state_model)
        self._set_on_state_model("normalization_interpolate", state_model)

        self._set_on_state_model("transmission_incident_monitor", state_model)
        self._set_on_state_model("transmission_interpolate", state_model)
        self._set_on_state_model("transmission_roi_files", state_model)
        self._set_on_state_model("transmission_mask_files", state_model)
        self._set_on_state_model("transmission_radius", state_model)
        self._set_on_state_model("transmission_monitor", state_model)
        self._set_on_state_model("transmission_mn_shift", state_model)
        self._set_on_state_model("show_transmission", state_model)

        self._set_on_state_model_transmission_fit(state_model)

        self._set_on_state_model("pixel_adjustment_det_1", state_model)
        self._set_on_state_model("pixel_adjustment_det_2", state_model)
        self._set_on_state_model("wavelength_adjustment_det_1", state_model)
        self._set_on_state_model("wavelength_adjustment_det_2", state_model)

        # Q tab
        self._set_on_state_model_q_1d_rebin_string(state_model)
        self._set_on_state_model("q_xy_max", state_model)
        self._set_on_state_model("q_xy_step", state_model)
        self._set_on_state_model("q_xy_step_type", state_model)

        self._set_on_state_model("gravity_on_off", state_model)
        self._set_on_state_model("gravity_extra_length", state_model)

        self._set_on_state_model("use_q_resolution", state_model)
        self._set_on_state_model("q_resolution_source_a", state_model)
        self._set_on_state_model("q_resolution_sample_a", state_model)
        self._set_on_state_model("q_resolution_source_h", state_model)
        self._set_on_state_model("q_resolution_sample_h", state_model)
        self._set_on_state_model("q_resolution_source_w", state_model)
        self._set_on_state_model("q_resolution_sample_w", state_model)
        self._set_on_state_model("q_resolution_delta_r", state_model)
        self._set_on_state_model("q_resolution_collimation_length",
                                 state_model)
        self._set_on_state_model("q_resolution_moderator_file", state_model)

        # Mask
        self._set_on_state_model("phi_limit_min", state_model)
        self._set_on_state_model("phi_limit_max", state_model)
        self._set_on_state_model("phi_limit_use_mirror", state_model)
        self._set_on_state_model("radius_limit_min", state_model)
        self._set_on_state_model("radius_limit_max", state_model)

        # Beam Centre
        self._beam_centre_presenter.set_on_state_model("lab_pos_1",
                                                       state_model)
        self._beam_centre_presenter.set_on_state_model("lab_pos_2",
                                                       state_model)

        return state_model

    def _set_on_state_model_transmission_fit(self, state_model):
        # Behaviour depends on the selection of the fit
        if self._view.use_same_transmission_fit_setting_for_sample_and_can():
            use_fit = self._view.transmission_sample_use_fit
            fit_type = self._view.transmission_sample_fit_type
            polynomial_order = self._view.transmission_sample_polynomial_order
            state_model.transmission_sample_fit_type = fit_type if use_fit else FitType.NoFit
            state_model.transmission_can_fit_type = fit_type if use_fit else FitType.NoFit
            state_model.transmission_sample_polynomial_order = polynomial_order
            state_model.transmission_can_polynomial_order = polynomial_order

            # Wavelength settings
            if self._view.transmission_sample_use_wavelength:
                wavelength_min = self._view.transmission_sample_wavelength_min
                wavelength_max = self._view.transmission_sample_wavelength_max
                state_model.transmission_sample_wavelength_min = wavelength_min
                state_model.transmission_sample_wavelength_max = wavelength_max
                state_model.transmission_can_wavelength_min = wavelength_min
                state_model.transmission_can_wavelength_max = wavelength_max
        else:
            # Sample
            use_fit_sample = self._view.transmission_sample_use_fit
            fit_type_sample = self._view.transmission_sample_fit_type
            polynomial_order_sample = self._view.transmission_sample_polynomial_order
            state_model.transmission_sample_fit_type = fit_type_sample if use_fit_sample else FitType.NoFit
            state_model.transmission_sample_polynomial_order = polynomial_order_sample

            # Wavelength settings
            if self._view.transmission_sample_use_wavelength:
                wavelength_min = self._view.transmission_sample_wavelength_min
                wavelength_max = self._view.transmission_sample_wavelength_max
                state_model.transmission_sample_wavelength_min = wavelength_min
                state_model.transmission_sample_wavelength_max = wavelength_max

            # Can
            use_fit_can = self._view.transmission_can_use_fit
            fit_type_can = self._view.transmission_can_fit_type
            polynomial_order_can = self._view.transmission_can_polynomial_order
            state_model.transmission_can_fit_type = fit_type_can if use_fit_can else FitType.NoFit
            state_model.transmission_can_polynomial_order = polynomial_order_can

            # Wavelength settings
            if self._view.transmission_can_use_wavelength:
                wavelength_min = self._view.transmission_can_wavelength_min
                wavelength_max = self._view.transmission_can_wavelength_max
                state_model.transmission_can_wavelength_min = wavelength_min
                state_model.transmission_can_wavelength_max = wavelength_max

    def _set_on_state_model_q_1d_rebin_string(self, state_model):
        q_1d_step_type = self._view.q_1d_step_type

        # If we are dealing with a simple rebin string then the step type is None
        if self._view.q_1d_step_type is None:
            state_model.q_1d_rebin_string = self._view.q_1d_min_or_rebin_string
        else:
            q_1d_min = self._view.q_1d_min_or_rebin_string
            q_1d_max = self._view.q_1d_max
            q_1d_step = self._view.q_1d_step
            if q_1d_min and q_1d_max and q_1d_step and q_1d_step_type:
                q_1d_rebin_string = str(q_1d_min) + ","
                q_1d_step_type_factor = -1. if q_1d_step_type is RangeStepType.Log else 1.
                q_1d_rebin_string += str(
                    q_1d_step_type_factor * q_1d_step) + ","
                q_1d_rebin_string += str(q_1d_max)
                state_model.q_1d_rebin_string = q_1d_rebin_string

    def _set_on_state_model(self, attribute_name, state_model):
        attribute = getattr(self._view, attribute_name)
        if attribute or isinstance(attribute, bool):
            setattr(state_model, attribute_name, attribute)

    def _get_table_model(self):
        # 1. Create a new table model
        user_file = self._view.get_user_file_path()
        batch_file = self._view.get_batch_file_path()

        table_model = TableModel()
        table_model.user_file = user_file
        self.batch_file = batch_file

        # 2. Iterate over each row, create a table row model and insert it
        number_of_rows = self._view.get_number_of_rows()
        for row in range(number_of_rows):
            sample_scatter = self._view.get_cell(row=row,
                                                 column=SAMPLE_SCATTER_INDEX,
                                                 convert_to=str)
            sample_scatter_period = self._view.get_cell(
                row=row, column=SAMPLE_SCATTER_PERIOD_INDEX, convert_to=str)
            sample_transmission = self._view.get_cell(
                row=row, column=SAMPLE_TRANSMISSION_INDEX, convert_to=str)
            sample_transmission_period = self._view.get_cell(
                row=row,
                column=SAMPLE_TRANSMISSION_PERIOD_INDEX,
                convert_to=str)  # noqa
            sample_direct = self._view.get_cell(row=row,
                                                column=SAMPLE_DIRECT_INDEX,
                                                convert_to=str)
            sample_direct_period = self._view.get_cell(
                row=row, column=SAMPLE_DIRECT_PERIOD_INDEX, convert_to=str)
            can_scatter = self._view.get_cell(row=row,
                                              column=CAN_SCATTER_INDEX,
                                              convert_to=str)
            can_scatter_period = self._view.get_cell(
                row=row, column=CAN_SCATTER_PERIOD_INDEX, convert_to=str)
            can_transmission = self._view.get_cell(
                row=row, column=CAN_TRANSMISSION_INDEX, convert_to=str)
            can_transmission_period = self._view.get_cell(
                row=row, column=CAN_TRANSMISSION_PERIOD_INDEX, convert_to=str)
            can_direct = self._view.get_cell(row=row,
                                             column=CAN_DIRECT_INDEX,
                                             convert_to=str)
            can_direct_period = self._view.get_cell(
                row=row, column=CAN_DIRECT_PERIOD_INDEX, convert_to=str)
            output_name = self._view.get_cell(row=row,
                                              column=OUTPUT_NAME_INDEX,
                                              convert_to=str)
            user_file = self._view.get_cell(row=row,
                                            column=USER_FILE_INDEX,
                                            convert_to=str)

            # Get the options string
            # We don't have to add the hidden column here, since it only contains information for the SANS
            # workflow to operate properly. It however does not contain information for the
            options_string = self._get_options(row)

            table_index_model = TableIndexModel(
                index=row,
                sample_scatter=sample_scatter,
                sample_scatter_period=sample_scatter_period,
                sample_transmission=sample_transmission,
                sample_transmission_period=sample_transmission_period,
                sample_direct=sample_direct,
                sample_direct_period=sample_direct_period,
                can_scatter=can_scatter,
                can_scatter_period=can_scatter_period,
                can_transmission=can_transmission,
                can_transmission_period=can_transmission_period,
                can_direct=can_direct,
                can_direct_period=can_direct_period,
                output_name=output_name,
                user_file=user_file,
                options_column_string=options_string)
            table_model.add_table_entry(row, table_index_model)
        return table_model

    def _create_states(self, state_model, table_model, row_index=None):
        """
        Here we create the states based on the settings in the models
        :param state_model: the state model object
        :param table_model: the table model object
        :param row_index: the selected row, if None then all rows are generated
        """
        number_of_rows = self._view.get_number_of_rows()
        if row_index is not None:
            # Check if the selected index is valid
            if row_index >= number_of_rows:
                return None
            rows = [row_index]
        else:
            rows = range(number_of_rows)
        states = {}
        gui_state_director = GuiStateDirector(table_model, state_model,
                                              self._facility)
        for row in rows:
            self.sans_logger.information(
                "Generating state for row {}".format(row))
            if not self.is_empty_row(row):
                row_user_file = table_model.get_row_user_file(row)
                if row_user_file:
                    user_file_path = FileFinder.getFullPath(row_user_file)
                    if not os.path.exists(user_file_path):
                        raise RuntimeError(
                            "The user path {} does not exist. Make sure a valid user file path"
                            " has been specified.".format(user_file_path))

                    user_file_reader = UserFileReader(user_file_path)
                    user_file_items = user_file_reader.read_user_file()

                    row_state_model = StateGuiModel(user_file_items)
                    row_gui_state_director = GuiStateDirector(
                        table_model, row_state_model, self._facility)
                    self._create_row_state(row_gui_state_director, states, row)
                else:
                    self._create_row_state(gui_state_director, states, row)
        return states

    def _create_row_state(self, director, states, row):
        try:
            state = director.create_state(row)
            states.update({row: state})
        except (ValueError, RuntimeError) as e:
            raise RuntimeError(
                "There was a bad entry for row {}. Ensure that the path to your files has "
                "been added to the Mantid search directories! See here for more "
                "details: {}".format(row, str(e)))

    def _populate_row_in_table(self, row):
        """
        Adds a row to the table
        """
        def get_string_entry(_tag, _row):
            _element = ""
            if _tag in _row:
                _element = _row[_tag]
            return _element

        def get_string_period(_tag):
            return "" if _tag == ALL_PERIODS else str(_tag)

        # 1. Pull out the entries
        sample_scatter = get_string_entry(BatchReductionEntry.SampleScatter,
                                          row)
        sample_scatter_period = get_string_entry(
            BatchReductionEntry.SampleScatterPeriod, row)
        sample_transmission = get_string_entry(
            BatchReductionEntry.SampleTransmission, row)
        sample_transmission_period = get_string_entry(
            BatchReductionEntry.SampleTransmissionPeriod, row)
        sample_direct = get_string_entry(BatchReductionEntry.SampleDirect, row)
        sample_direct_period = get_string_entry(
            BatchReductionEntry.SampleDirectPeriod, row)
        can_scatter = get_string_entry(BatchReductionEntry.CanScatter, row)
        can_scatter_period = get_string_entry(
            BatchReductionEntry.CanScatterPeriod, row)
        can_transmission = get_string_entry(
            BatchReductionEntry.CanTransmission, row)
        can_transmission_period = get_string_entry(
            BatchReductionEntry.CanScatterPeriod, row)
        can_direct = get_string_entry(BatchReductionEntry.CanDirect, row)
        can_direct_period = get_string_entry(
            BatchReductionEntry.CanDirectPeriod, row)
        output_name = get_string_entry(BatchReductionEntry.Output, row)

        # 2. Create entry that can be understood by table
        row_entry = "SampleScatter:{},ssp:{},SampleTrans:{},stp:{},SampleDirect:{},sdp:{}," \
                    "CanScatter:{},csp:{},CanTrans:{},ctp:{}," \
                    "CanDirect:{},cdp:{},OutputName:{}".format(sample_scatter,
                                                               get_string_period(sample_scatter_period),
                                                               sample_transmission,
                                                               get_string_period(sample_transmission_period),
                                                               sample_direct,
                                                               get_string_period(sample_direct_period),
                                                               can_scatter,
                                                               get_string_period(can_scatter_period),
                                                               can_transmission,
                                                               get_string_period(can_transmission_period),
                                                               can_direct,
                                                               get_string_period(can_direct_period),
                                                               output_name)

        self._view.add_row(row_entry)

    # ------------------------------------------------------------------------------------------------------------------
    # Settings
    # ------------------------------------------------------------------------------------------------------------------
    def _setup_instrument_specific_settings(self):
        # Get the first run number of the scatter data for the first table
        sample_scatter = self._view.get_cell(row=0, column=0, convert_to=str)

        # Check if it exists at all
        if not sample_scatter:
            return

        # Get the file information from
        file_information_factory = SANSFileInformationFactory()
        try:
            self._file_information = file_information_factory.create_sans_file_information(
                sample_scatter)
        except NotImplementedError:
            self.sans_logger.warning(
                "Could not get file information from {}.".format(
                    sample_scatter))
            self._file_information = None

        # Provide the instrument specific settings
        if self._file_information:
            # Set the instrument on the table
            instrument = self._file_information.get_instrument()
            self._view.set_instrument_settings(instrument)

            # Set the reduction mode
            reduction_mode_list = get_reduction_mode_strings_for_gui(
                instrument=instrument)
            self._view.set_reduction_modes(reduction_mode_list)
        else:
            self._view.set_instrument_settings(SANSInstrument.NoInstrument)
            reduction_mode_list = get_reduction_mode_strings_for_gui()
            self._view.set_reduction_modes(reduction_mode_list)

    # ------------------------------------------------------------------------------------------------------------------
    # Setting workaround for state in DataProcessorWidget
    # ------------------------------------------------------------------------------------------------------------------
    def _remove_dummy_workspaces_and_row_index(self):
        number_of_rows = self._view.get_number_of_rows()
        for row in range(number_of_rows):
            self._remove_from_hidden_options(row, "InputWorkspace")
            self._remove_from_hidden_options(row, "RowIndex")

    def _set_indices(self):
        number_of_rows = self._view.get_number_of_rows()
        for row in range(number_of_rows):
            to_set = Property.EMPTY_INT if self.is_empty_row(row) else row
            self._add_to_hidden_options(row, "RowIndex", to_set)

    def _set_dummy_workspace(self):
        number_of_rows = self._view.get_number_of_rows()
        for row in range(number_of_rows):
            self._add_to_hidden_options(
                row, "InputWorkspace",
                SANS_DUMMY_INPUT_ALGORITHM_PROPERTY_NAME)

    @staticmethod
    def _create_dummy_input_workspace():
        if not AnalysisDataService.doesExist(
                SANS_DUMMY_INPUT_ALGORITHM_PROPERTY_NAME):
            workspace = WorkspaceFactory.create("Workspace2D", 1, 1, 1)
            AnalysisDataService.addOrReplace(
                SANS_DUMMY_INPUT_ALGORITHM_PROPERTY_NAME, workspace)

    @staticmethod
    def _delete_dummy_input_workspace():
        if AnalysisDataService.doesExist(
                SANS_DUMMY_INPUT_ALGORITHM_PROPERTY_NAME):
            AnalysisDataService.remove(
                SANS_DUMMY_INPUT_ALGORITHM_PROPERTY_NAME)
示例#17
0
class RunTabPresenter(object):
    class ConcreteRunTabListener(SANSDataProcessorGui.RunTabListener):
        def __init__(self, presenter):
            super(RunTabPresenter.ConcreteRunTabListener, self).__init__()
            self._presenter = presenter

        def on_user_file_load(self):
            self._presenter.on_user_file_load()

        def on_mask_file_add(self):
            self._presenter.on_mask_file_add()

        def on_batch_file_load(self):
            self._presenter.on_batch_file_load()

        def on_processed_clicked(self):
            self._presenter.on_processed_clicked()

        def on_multi_period_selection(self, show_periods):
            self._presenter.on_multiperiod_changed(show_periods)

        def on_data_changed(self, row, column, new_value, old_value):
            self._presenter.on_data_changed(row, column, new_value, old_value)

        def on_manage_directories(self):
            self._presenter.on_manage_directories()

        def on_instrument_changed(self):
            self._presenter.on_instrument_changed()

        def on_row_inserted(self, index, row):
            self._presenter.on_row_inserted(index, row)

        def on_rows_removed(self, rows):
            self._presenter.on_rows_removed(rows)

        def on_copy_rows_requested(self):
            self._presenter.on_copy_rows_requested()

        def on_paste_rows_requested(self):
            self._presenter.on_paste_rows_requested()

        def on_insert_row(self):
            self._presenter.on_insert_row()

        def on_erase_rows(self):
            self._presenter.on_erase_rows()

        def on_cut_rows(self):
            self._presenter.on_cut_rows_requested()

    class ProcessListener(WorkHandler.WorkListener):
        def __init__(self, presenter):
            super(RunTabPresenter.ProcessListener, self).__init__()
            self._presenter = presenter

        def on_processing_finished(self, result):
            self._presenter.on_processing_finished(result)

        def on_processing_error(self, error):
            self._presenter.on_processing_error(error)

    def __init__(self, facility, view=None):
        super(RunTabPresenter, self).__init__()
        self._facility = facility
        # Logger
        self.sans_logger = Logger("SANS")
        # Name of grpah to output to
        self.output_graph = 'SANS-Latest'
        self.progress = 0

        # Models that are being used by the presenter
        self._state_model = None
        self._table_model = TableModel()

        # Presenter needs to have a handle on the view since it delegates it
        self._view = None
        self.set_view(view)
        self._processing = False
        self.work_handler = WorkHandler()
        self.batch_process_runner = BatchProcessRunner(
            self.notify_progress, self.on_processing_finished,
            self.on_processing_error)

        # File information for the first input
        self._file_information = None
        self._clipboard = []

        # Settings diagnostic tab presenter
        self._settings_diagnostic_tab_presenter = SettingsDiagnosticPresenter(
            self)

        # Masking table presenter
        self._masking_table_presenter = MaskingTablePresenter(self)

        # Beam centre presenter
        self._beam_centre_presenter = BeamCentrePresenter(
            self, WorkHandler, BeamCentreModel, SANSCentreFinder)

        # Workspace Diagnostic page presenter
        self._workspace_diagnostic_presenter = DiagnosticsPagePresenter(
            self, WorkHandler, run_integral, create_state, self._facility)

    def _default_gui_setup(self):
        """
        Provides a default setup of the GUI. This is important for the initial start up, when the view is being set.
        """
        # Set the possible reduction modes
        reduction_mode_list = get_reduction_mode_strings_for_gui()
        self._view.set_reduction_modes(reduction_mode_list)

        # Set the possible instruments
        instrument_list = get_instrument_strings_for_gui()
        self._view.set_instruments(instrument_list)

        # Set the step type options for wavelength
        range_step_types = [
            RangeStepType.to_string(RangeStepType.Lin),
            RangeStepType.to_string(RangeStepType.Log),
            RangeStepType.to_string(RangeStepType.RangeLog),
            RangeStepType.to_string(RangeStepType.RangeLin)
        ]
        self._view.wavelength_step_type = range_step_types

        # Set the geometry options. This needs to include the option to read the sample shape from file.
        sample_shape = [
            "Read from file", SampleShape.Cylinder, SampleShape.FlatPlate,
            SampleShape.Disc
        ]
        self._view.sample_shape = sample_shape

        # Set the q range
        self._view.q_1d_step_type = [
            RangeStepType.to_string(RangeStepType.Lin),
            RangeStepType.to_string(RangeStepType.Log)
        ]
        self._view.q_xy_step_type = [
            RangeStepType.to_string(RangeStepType.Lin)
        ]

        # Set the fit options
        fit_types = [
            FitType.to_string(FitType.Linear),
            FitType.to_string(FitType.Logarithmic),
            FitType.to_string(FitType.Polynomial)
        ]
        self._view.transmission_sample_fit_type = fit_types
        self._view.transmission_can_fit_type = fit_types

    # ------------------------------------------------------------------------------------------------------------------
    # Table + Actions
    # ------------------------------------------------------------------------------------------------------------------
    def set_view(self, view):
        """
        Sets the view
        :param view: the view is the SANSDataProcessorGui. The presenter needs to access some of the API
        """
        if view is not None:
            self._view = view

            # Add a listener to the view
            listener = RunTabPresenter.ConcreteRunTabListener(self)
            self._view.add_listener(listener)

            # Default gui setup
            self._default_gui_setup()

            # Set appropriate view for the state diagnostic tab presenter
            self._settings_diagnostic_tab_presenter.set_view(
                self._view.settings_diagnostic_tab)

            # Set appropriate view for the masking table presenter
            self._masking_table_presenter.set_view(self._view.masking_table)

            # Set the appropriate view for the beam centre presenter
            self._beam_centre_presenter.set_view(self._view.beam_centre)

            # Set the appropriate view for the diagnostic page
            self._workspace_diagnostic_presenter.set_view(
                self._view.diagnostic_page, self._view.instrument)

            self._view.setup_layout()
            self._view.set_hinting_line_edit_for_column(
                15, self._table_model.get_options_hint_strategy())

    def on_user_file_load(self):
        """
        Loads the user file. Populates the models and the view.
        """
        try:
            # 1. Get the user file path from the view
            user_file_path = self._view.get_user_file_path()

            if not user_file_path:
                return
            # 2. Get the full file path
            user_file_path = FileFinder.getFullPath(user_file_path)
            if not os.path.exists(user_file_path):
                raise RuntimeError(
                    "The user path {} does not exist. Make sure a valid user file path"
                    " has been specified.".format(user_file_path))
            self._table_model.user_file = user_file_path
            # Clear out the current view
            self._view.reset_all_fields_to_default()

            # 3. Read and parse the user file
            user_file_reader = UserFileReader(user_file_path)
            user_file_items = user_file_reader.read_user_file()

            # 4. Populate the model
            self._state_model = StateGuiModel(user_file_items)
            # 5. Update the views.
            self._update_view_from_state_model()
            self._beam_centre_presenter.update_centre_positions(
                self._state_model)

            self._beam_centre_presenter.on_update_rows()
            self._masking_table_presenter.on_update_rows()
            self._workspace_diagnostic_presenter.on_user_file_load(
                user_file_path)

        except Exception as e:
            self.sans_logger.error(
                "Loading of the user file failed. {}".format(str(e)))
            self.display_warning_box('Warning',
                                     'Loading of the user file failed.',
                                     str(e))

    def on_batch_file_load(self):
        """
        Loads a batch file and populates the batch table based on that.
        """
        try:
            # 1. Get the batch file from the view
            batch_file_path = self._view.get_batch_file_path()

            if not batch_file_path:
                return

            if not os.path.exists(batch_file_path):
                raise RuntimeError(
                    "The batch file path {} does not exist. Make sure a valid batch file path"
                    " has been specified.".format(batch_file_path))

            self._table_model.batch_file = batch_file_path

            # 2. Read the batch file
            batch_file_parser = BatchCsvParser(batch_file_path)
            parsed_rows = batch_file_parser.parse_batch_file()

            # 3. Populate the table
            self._table_model.clear_table_entries()
            for index, row in enumerate(parsed_rows):
                self._add_row_to_table_model(row, index)
            self._table_model.remove_table_entries([len(parsed_rows)])

            self.update_view_from_table_model()

            self._beam_centre_presenter.on_update_rows()
            self._masking_table_presenter.on_update_rows()

        except RuntimeError as e:
            self.sans_logger.error(
                "Loading of the batch file failed. {}".format(str(e)))
            self.display_warning_box('Warning',
                                     'Loading of the batch file failed',
                                     str(e))

    def _add_row_to_table_model(self, row, index):
        """
        Adds a row to the table
        """
        def get_string_entry(_tag, _row):
            _element = ""
            if _tag in _row:
                _element = _row[_tag]
            return _element

        def get_string_period(_tag):
            return "" if _tag == ALL_PERIODS else str(_tag)

        # 1. Pull out the entries
        sample_scatter = get_string_entry(BatchReductionEntry.SampleScatter,
                                          row)
        sample_scatter_period = get_string_period(
            get_string_entry(BatchReductionEntry.SampleScatterPeriod, row))
        sample_transmission = get_string_entry(
            BatchReductionEntry.SampleTransmission, row)
        sample_transmission_period = \
            get_string_period(get_string_entry(BatchReductionEntry.SampleTransmissionPeriod, row))
        sample_direct = get_string_entry(BatchReductionEntry.SampleDirect, row)
        sample_direct_period = get_string_period(
            get_string_entry(BatchReductionEntry.SampleDirectPeriod, row))
        can_scatter = get_string_entry(BatchReductionEntry.CanScatter, row)
        can_scatter_period = get_string_period(
            get_string_entry(BatchReductionEntry.CanScatterPeriod, row))
        can_transmission = get_string_entry(
            BatchReductionEntry.CanTransmission, row)
        can_transmission_period = get_string_period(
            get_string_entry(BatchReductionEntry.CanScatterPeriod, row))
        can_direct = get_string_entry(BatchReductionEntry.CanDirect, row)
        can_direct_period = get_string_period(
            get_string_entry(BatchReductionEntry.CanDirectPeriod, row))
        output_name = get_string_entry(BatchReductionEntry.Output, row)
        file_information_factory = SANSFileInformationFactory()
        file_information = file_information_factory.create_sans_file_information(
            sample_scatter)
        sample_thickness = file_information._thickness
        user_file = get_string_entry(BatchReductionEntry.UserFile, row)

        row_entry = [
            sample_scatter, sample_scatter_period, sample_transmission,
            sample_transmission_period, sample_direct, sample_direct_period,
            can_scatter, can_scatter_period, can_transmission,
            can_transmission_period, can_direct, can_direct_period,
            output_name, user_file, sample_thickness, ''
        ]

        table_index_model = TableIndexModel(*row_entry)

        self._table_model.add_table_entry(index, table_index_model)

    def update_view_from_table_model(self):
        self._view.clear_table()
        self._view.hide_period_columns()
        for row_index, row in enumerate(self._table_model._table_entries):
            row_entry = [str(x) for x in row.to_list()]
            self._view.add_row(row_entry)
            self._view.change_row_color(
                row_state_to_colour_mapping[row.row_state], row_index + 1)
            self._view.set_row_tooltip(row.tool_tip, row_index + 1)
            if row.isMultiPeriod():
                self._view.show_period_columns()
        self._view.remove_rows([0])
        self._view.clear_selection()

    def on_data_changed(self, row, column, new_value, old_value):
        self._table_model.update_table_entry(row, column, new_value)
        self._view.change_row_color(
            row_state_to_colour_mapping[RowState.Unprocessed], row)
        self._view.set_row_tooltip('', row)
        self._beam_centre_presenter.on_update_rows()
        self._masking_table_presenter.on_update_rows()

    def on_instrument_changed(self):
        self._setup_instrument_specific_settings()

    def on_processed_clicked(self):
        """
        Prepares the batch reduction.

        0. Validate rows and create dummy workspace if it does not exist
        1. Sets up the states
        2. Adds a dummy input workspace
        3. Adds row index information
        """
        try:
            self._view.disable_buttons()
            self._processing = True
            self.sans_logger.information("Starting processing of batch table.")

            # 1. Set up the states and convert them into property managers
            selected_rows = self._view.get_selected_rows()
            selected_rows = selected_rows if selected_rows else range(
                self._table_model.get_number_of_rows())
            for row in selected_rows:
                self._table_model.reset_row_state(row)
            self.update_view_from_table_model()
            states, errors = self.get_states(row_index=selected_rows)

            for row, error in errors.items():
                self.on_processing_error(row, error)

            if not states:
                self.on_processing_finished(None)
                return

            # 4. Create the graph if continuous output is specified
            if mantidplot:
                if self._view.plot_results and not mantidplot.graph(
                        self.output_graph):
                    mantidplot.newGraph(self.output_graph)

            # Check if optimizations should be used
            use_optimizations = self._view.use_optimizations

            # Get the output mode
            output_mode = self._view.output_mode

            # Check if results should be plotted
            plot_results = self._view.plot_results

            # Get the name of the graph to output to
            output_graph = self.output_graph

            self.progress = 0
            setattr(self._view, 'progress_bar_value', self.progress)
            setattr(self._view, 'progress_bar_maximum', len(states))
            self.batch_process_runner.process_states(states, use_optimizations,
                                                     output_mode, plot_results,
                                                     output_graph)

        except Exception as e:
            self._view.enable_buttons()
            self.sans_logger.error("Process halted due to: {}".format(str(e)))
            self.display_warning_box('Warning', 'Process halted', str(e))

    def on_multiperiod_changed(self, show_periods):
        if show_periods:
            self._view.show_period_columns()
        else:
            self._view.hide_period_columns()

    def display_warning_box(self, title, text, detailed_text):
        self._view.display_message_box(title, text, detailed_text)

    def notify_progress(self, row):
        self.increment_progress()
        message = ''
        self._table_model.set_row_to_processed(row, message)
        self.update_view_from_table_model()

    def on_processing_finished(self, result):
        self._view.enable_buttons()
        self._processing = False

    def on_processing_error(self, row, error_msg):
        self.increment_progress()
        self._table_model.set_row_to_error(row, error_msg)
        self.update_view_from_table_model()

    def increment_progress(self):
        self.progress = self.progress + 1
        setattr(self._view, 'progress_bar_value', self.progress)

    def on_row_inserted(self, index, row):
        row_table_index = TableIndexModel(*row)
        self._table_model.add_table_entry(index, row_table_index)

    def on_insert_row(self):
        selected_rows = self._view.get_selected_rows()
        selected_row = selected_rows[
            0] + 1 if selected_rows else self._table_model.get_number_of_rows(
            )
        table_entry_row = self._table_model.create_empty_row()
        self._table_model.add_table_entry(selected_row, table_entry_row)
        self.update_view_from_table_model()

    def on_erase_rows(self):
        selected_rows = self._view.get_selected_rows()
        empty_row = TableModel.create_empty_row()
        for row in selected_rows:
            self._table_model.replace_table_entries([row], [empty_row])
        self.update_view_from_table_model()

    def on_rows_removed(self, rows):
        self._table_model.remove_table_entries(rows)
        self.update_view_from_table_model()

    def on_copy_rows_requested(self):
        selected_rows = self._view.get_selected_rows()
        self._clipboard = []
        for row in selected_rows:
            data_from_table_model = self._table_model.get_table_entry(
                row).to_list()
            self._clipboard.append(data_from_table_model)

    def on_cut_rows_requested(self):
        self.on_copy_rows_requested()
        rows = self._view.get_selected_rows()
        self.on_rows_removed(rows)

    def on_paste_rows_requested(self):
        if self._clipboard:
            selected_rows = self._view.get_selected_rows()
            selected_rows = selected_rows if selected_rows else [
                self._table_model.get_number_of_rows()
            ]
            replacement_table_index_models = [
                TableIndexModel(*x) for x in self._clipboard
            ]
            self._table_model.replace_table_entries(
                selected_rows, replacement_table_index_models)
            self.update_view_from_table_model()

    def on_manage_directories(self):
        self._view.show_directory_manager()

    def get_row_indices(self):
        """
        Gets the indices of row which are not empty.
        :return: a list of row indices.
        """
        row_indices_which_are_not_empty = []
        number_of_rows = self._table_model.get_number_of_rows()
        for row in range(number_of_rows):
            if not self.is_empty_row(row):
                row_indices_which_are_not_empty.append(row)
        return row_indices_which_are_not_empty

    def on_mask_file_add(self):
        """
        We get the added mask file name and add it to the list of masks
        """
        new_mask_file = self._view.get_mask_file()
        if not new_mask_file:
            return
        new_mask_file_full_path = FileFinder.getFullPath(new_mask_file)
        if not new_mask_file_full_path:
            return

        # Add the new mask file to state model
        mask_files = self._state_model.mask_files

        mask_files.append(new_mask_file)
        self._state_model.mask_files = mask_files

        # Make sure that the sub-presenters are up to date with this change
        self._masking_table_presenter.on_update_rows()
        self._settings_diagnostic_tab_presenter.on_update_rows()
        self._beam_centre_presenter.on_update_rows()

    def is_empty_row(self, row):
        """
        Checks if a row has no entries. These rows will be ignored.
        :param row: the row index
        :return: True if the row is empty.
        """
        return self._table_model.is_empty_row(row)

    # def _validate_rows(self):
    #     """
    #     Validation of the rows. A minimal setup requires that ScatterSample is set.
    #     """
    #     # If SampleScatter is empty, then don't run the reduction.
    #     # We allow empty rows for now, since we cannot remove them from Python.
    #     number_of_rows = self._table_model.get_number_of_rows()
    #     for row in range(number_of_rows):
    #         if not self.is_empty_row(row):
    #             sample_scatter = self._view.get_cell(row, 0)
    #             if not sample_scatter:
    #                 raise RuntimeError("Row {} has not SampleScatter specified. Please correct this.".format(row))

    # ------------------------------------------------------------------------------------------------------------------
    # Controls
    # ------------------------------------------------------------------------------------------------------------------
    def disable_controls(self):
        """
        Disable all input fields and buttons during the execution of the reduction.
        """
        # TODO: think about enabling and disable some controls during reduction
        pass

    def enable_controls(self):
        """
        Enable all input fields and buttons after the execution has completed.
        """
        # TODO: think about enabling and disable some controls during reduction
        pass

    # ------------------------------------------------------------------------------------------------------------------
    # Table Model and state population
    # ------------------------------------------------------------------------------------------------------------------
    def get_states(self, row_index=None, file_lookup=True):
        """
        Gathers the state information for all rows.
        :param row_index: if a single row is selected, then only this row is returned, else all the state for all
                             rows is returned
        :return: a list of states
        """
        start_time_state_generation = time.time()

        # 1. Update the state model
        state_model_with_view_update = self._get_state_model_with_view_update()
        # 2. Update the table model
        table_model = self._table_model

        # 3. Go through each row and construct a state object
        if table_model and state_model_with_view_update:
            states, errors = create_states(state_model_with_view_update,
                                           table_model,
                                           self._view.instrument,
                                           self._facility,
                                           row_index=row_index,
                                           file_lookup=file_lookup)
        else:
            states = None
            errors = None
        stop_time_state_generation = time.time()
        time_taken = stop_time_state_generation - start_time_state_generation
        self.sans_logger.information(
            "The generation of all states took {}s".format(time_taken))
        return states, errors

    def get_state_for_row(self, row_index, file_lookup=True):
        """
        Creates the state for a particular row.
        :param row_index: the row index
        :return: a state if the index is valid and there is a state else None
        """
        states, errors = self.get_states(row_index=[row_index],
                                         file_lookup=file_lookup)
        if states is None:
            self.sans_logger.warning(
                "There does not seem to be data for a row {}.".format(
                    row_index))
            return None

        if row_index in list(states.keys()):
            if states:
                return states[row_index]
        return None

    def _update_view_from_state_model(self):
        # Front tab view
        self._set_on_view("zero_error_free")
        self._set_on_view("save_types")
        self._set_on_view("compatibility_mode")

        self._set_on_view("merge_scale")
        self._set_on_view("merge_shift")
        self._set_on_view("merge_scale_fit")
        self._set_on_view("merge_shift_fit")
        self._set_on_view("merge_q_range_start")
        self._set_on_view("merge_q_range_stop")
        self._set_on_view("merge_max")
        self._set_on_view("merge_min")

        # Settings tab view
        self._set_on_view("reduction_dimensionality")
        self._set_on_view("reduction_mode")
        self._set_on_view("event_slices")
        self._set_on_view("event_binning")
        self._set_on_view("merge_mask")

        self._set_on_view("wavelength_step_type")
        self._set_on_view("wavelength_min")
        self._set_on_view("wavelength_max")
        self._set_on_view("wavelength_step")

        self._set_on_view("absolute_scale")
        self._set_on_view("sample_shape")
        self._set_on_view("sample_height")
        self._set_on_view("sample_width")
        self._set_on_view("sample_thickness")
        self._set_on_view("z_offset")

        # Adjustment tab
        self._set_on_view("normalization_incident_monitor")
        self._set_on_view("normalization_interpolate")

        self._set_on_view("transmission_incident_monitor")
        self._set_on_view("transmission_interpolate")
        self._set_on_view("transmission_roi_files")
        self._set_on_view("transmission_mask_files")
        self._set_on_view("transmission_radius")
        self._set_on_view("transmission_monitor")
        self._set_on_view("transmission_mn_shift")
        self._set_on_view("show_transmission")

        self._set_on_view_transmission_fit()

        self._set_on_view("pixel_adjustment_det_1")
        self._set_on_view("pixel_adjustment_det_2")
        self._set_on_view("wavelength_adjustment_det_1")
        self._set_on_view("wavelength_adjustment_det_2")

        # Q tab
        self._set_on_view_q_rebin_string()
        self._set_on_view("q_xy_max")
        self._set_on_view("q_xy_step")
        self._set_on_view("q_xy_step_type")

        self._set_on_view("gravity_on_off")
        self._set_on_view("gravity_extra_length")

        self._set_on_view("use_q_resolution")
        self._set_on_view_q_resolution_aperture()
        self._set_on_view("q_resolution_delta_r")
        self._set_on_view("q_resolution_collimation_length")
        self._set_on_view("q_resolution_moderator_file")

        self._set_on_view("r_cut")
        self._set_on_view("w_cut")

        # Mask
        self._set_on_view("phi_limit_min")
        self._set_on_view("phi_limit_max")
        self._set_on_view("phi_limit_use_mirror")
        self._set_on_view("radius_limit_min")
        self._set_on_view("radius_limit_max")

    def _set_on_view_transmission_fit_sample_settings(self):
        # Set transmission_sample_use_fit
        fit_type = self._state_model.transmission_sample_fit_type
        use_fit = fit_type is not FitType.NoFit
        self._view.transmission_sample_use_fit = use_fit

        # Set the polynomial order for sample
        polynomial_order = self._state_model.transmission_sample_polynomial_order if fit_type is FitType.Polynomial else 2  # noqa
        self._view.transmission_sample_polynomial_order = polynomial_order

        # Set the fit type for the sample
        fit_type = fit_type if fit_type is not FitType.NoFit else FitType.Linear
        self._view.transmission_sample_fit_type = fit_type

        # Set the wavelength
        wavelength_min = self._state_model.transmission_sample_wavelength_min
        wavelength_max = self._state_model.transmission_sample_wavelength_max
        if wavelength_min and wavelength_max:
            self._view.transmission_sample_use_wavelength = True
            self._view.transmission_sample_wavelength_min = wavelength_min
            self._view.transmission_sample_wavelength_max = wavelength_max

    def _set_on_view_transmission_fit(self):
        # Steps for adding the transmission fit to the view
        # 1. Check if individual settings exist. If so then set the view to separate, else set them to both
        # 2. Apply the settings
        separate_settings = self._state_model.has_transmission_fit_got_separate_settings_for_sample_and_can(
        )
        self._view.set_fit_selection(use_separate=separate_settings)

        if separate_settings:
            self._set_on_view_transmission_fit_sample_settings()

            # Set transmission_sample_can_fit
            fit_type_can = self._state_model.transmission_can_fit_type()
            use_can_fit = fit_type_can is FitType.NoFit
            self._view.transmission_can_use_fit = use_can_fit

            # Set the polynomial order for can
            polynomial_order_can = self._state_model.transmission_can_polynomial_order if fit_type_can is FitType.Polynomial else 2  # noqa
            self._view.transmission_can_polynomial_order = polynomial_order_can

            # Set the fit type for the can
            fit_type_can = fit_type_can if fit_type_can is not FitType.NoFit else FitType.Linear
            self.transmission_can_fit_type = fit_type_can

            # Set the wavelength
            wavelength_min = self._state_model.transmission_can_wavelength_min
            wavelength_max = self._state_model.transmission_can_wavelength_max
            if wavelength_min and wavelength_max:
                self._view.transmission_can_use_wavelength = True
                self._view.transmission_can_wavelength_min = wavelength_min
                self._view.transmission_can_wavelength_max = wavelength_max
        else:
            self._set_on_view_transmission_fit_sample_settings()

    def _set_on_view_q_resolution_aperture(self):
        self._set_on_view("q_resolution_source_a")
        self._set_on_view("q_resolution_sample_a")
        self._set_on_view("q_resolution_source_h")
        self._set_on_view("q_resolution_sample_h")
        self._set_on_view("q_resolution_source_w")
        self._set_on_view("q_resolution_sample_w")

        # If we have h1, h2, w1, and w2 selected then we want to select the rectangular aperture.
        is_rectangular = self._state_model.q_resolution_source_h and self._state_model.q_resolution_sample_h and \
                         self._state_model.q_resolution_source_w and self._state_model.q_resolution_sample_w  # noqa
        self._view.set_q_resolution_shape_to_rectangular(is_rectangular)

    def _set_on_view_q_rebin_string(self):
        """
        Maps the q_1d_rebin_string of the model to the q_1d_step and q_1d_step_type property of the view.
        """
        rebin_string = self._state_model.q_1d_rebin_string
        # Extract the min, max and step and step type from the rebin string
        elements = rebin_string.split(",")
        # If we have three elements then we want to set only the
        if len(elements) == 3:
            step_element = float(elements[1])
            step = abs(step_element)
            step_type = RangeStepType.Lin if step_element >= 0 else RangeStepType.Log

            # Set on the view
            self._view.q_1d_min_or_rebin_string = float(elements[0])
            self._view.q_1d_max = float(elements[2])
            self._view.q_1d_step = step
            self._view.q_1d_step_type = step_type
        else:
            # Set the rebin string
            self._view.q_1d_min_or_rebin_string = rebin_string
            self._view.q_1d_step_type = self._view.VARIABLE

    def _set_on_view(self, attribute_name):
        attribute = getattr(self._state_model, attribute_name)
        if attribute or isinstance(
                attribute, bool
        ):  # We need to be careful here. We don't want to set empty strings, or None, but we want to set boolean values. # noqa
            setattr(self._view, attribute_name, attribute)

    def _set_on_view_with_view(self, attribute_name, view):
        attribute = getattr(self._state_model, attribute_name)
        if attribute or isinstance(
                attribute, bool
        ):  # We need to be careful here. We don't want to set empty strings, or None, but we want to set boolean values. # noqa
            setattr(view, attribute_name, attribute)

    def _get_state_model_with_view_update(self):
        """
        Goes through all sub presenters and update the state model based on the views.

        Note that at the moment we have set up the view and the model such that the name of a property must be the same
        in the view and the model. This can be easily changed, but it also provides a good cohesion.
        """
        state_model = copy.deepcopy(self._state_model)

        # If we don't have a state model then return None
        if state_model is None:
            return state_model
        # Run tab view
        self._set_on_state_model("zero_error_free", state_model)
        self._set_on_state_model("save_types", state_model)
        self._set_on_state_model("compatibility_mode", state_model)
        self._set_on_state_model("merge_scale", state_model)
        self._set_on_state_model("merge_shift", state_model)
        self._set_on_state_model("merge_scale_fit", state_model)
        self._set_on_state_model("merge_shift_fit", state_model)
        self._set_on_state_model("merge_q_range_start", state_model)
        self._set_on_state_model("merge_q_range_stop", state_model)
        self._set_on_state_model("merge_mask", state_model)
        self._set_on_state_model("merge_max", state_model)
        self._set_on_state_model("merge_min", state_model)

        # Settings tab
        self._set_on_state_model("reduction_dimensionality", state_model)
        self._set_on_state_model("reduction_mode", state_model)
        self._set_on_state_model("event_slices", state_model)
        self._set_on_state_model("event_binning", state_model)

        self._set_on_state_model("wavelength_step_type", state_model)
        self._set_on_state_model("wavelength_min", state_model)
        self._set_on_state_model("wavelength_max", state_model)
        self._set_on_state_model("wavelength_step", state_model)
        self._set_on_state_model("wavelength_range", state_model)

        self._set_on_state_model("absolute_scale", state_model)
        self._set_on_state_model("sample_shape", state_model)
        self._set_on_state_model("sample_height", state_model)
        self._set_on_state_model("sample_width", state_model)
        self._set_on_state_model("sample_thickness", state_model)
        self._set_on_state_model("z_offset", state_model)

        # Adjustment tab
        self._set_on_state_model("normalization_incident_monitor", state_model)
        self._set_on_state_model("normalization_interpolate", state_model)

        self._set_on_state_model("transmission_incident_monitor", state_model)
        self._set_on_state_model("transmission_interpolate", state_model)
        self._set_on_state_model("transmission_roi_files", state_model)
        self._set_on_state_model("transmission_mask_files", state_model)
        self._set_on_state_model("transmission_radius", state_model)
        self._set_on_state_model("transmission_monitor", state_model)
        self._set_on_state_model("transmission_mn_shift", state_model)
        self._set_on_state_model("show_transmission", state_model)

        self._set_on_state_model_transmission_fit(state_model)

        self._set_on_state_model("pixel_adjustment_det_1", state_model)
        self._set_on_state_model("pixel_adjustment_det_2", state_model)
        self._set_on_state_model("wavelength_adjustment_det_1", state_model)
        self._set_on_state_model("wavelength_adjustment_det_2", state_model)

        # Q tab
        self._set_on_state_model_q_1d_rebin_string(state_model)
        self._set_on_state_model("q_xy_max", state_model)
        self._set_on_state_model("q_xy_step", state_model)
        self._set_on_state_model("q_xy_step_type", state_model)

        self._set_on_state_model("gravity_on_off", state_model)
        self._set_on_state_model("gravity_extra_length", state_model)

        self._set_on_state_model("use_q_resolution", state_model)
        self._set_on_state_model("q_resolution_source_a", state_model)
        self._set_on_state_model("q_resolution_sample_a", state_model)
        self._set_on_state_model("q_resolution_source_h", state_model)
        self._set_on_state_model("q_resolution_sample_h", state_model)
        self._set_on_state_model("q_resolution_source_w", state_model)
        self._set_on_state_model("q_resolution_sample_w", state_model)
        self._set_on_state_model("q_resolution_delta_r", state_model)
        self._set_on_state_model("q_resolution_collimation_length",
                                 state_model)
        self._set_on_state_model("q_resolution_moderator_file", state_model)

        self._set_on_state_model("r_cut", state_model)
        self._set_on_state_model("w_cut", state_model)

        # Mask
        self._set_on_state_model("phi_limit_min", state_model)
        self._set_on_state_model("phi_limit_max", state_model)
        self._set_on_state_model("phi_limit_use_mirror", state_model)
        self._set_on_state_model("radius_limit_min", state_model)
        self._set_on_state_model("radius_limit_max", state_model)

        # Beam Centre
        self._beam_centre_presenter.set_on_state_model("lab_pos_1",
                                                       state_model)
        self._beam_centre_presenter.set_on_state_model("lab_pos_2",
                                                       state_model)

        return state_model

    def _set_on_state_model_transmission_fit(self, state_model):
        # Behaviour depends on the selection of the fit
        if self._view.use_same_transmission_fit_setting_for_sample_and_can():
            use_fit = self._view.transmission_sample_use_fit
            fit_type = self._view.transmission_sample_fit_type
            polynomial_order = self._view.transmission_sample_polynomial_order
            state_model.transmission_sample_fit_type = fit_type if use_fit else FitType.NoFit
            state_model.transmission_can_fit_type = fit_type if use_fit else FitType.NoFit
            state_model.transmission_sample_polynomial_order = polynomial_order
            state_model.transmission_can_polynomial_order = polynomial_order

            # Wavelength settings
            if self._view.transmission_sample_use_wavelength:
                wavelength_min = self._view.transmission_sample_wavelength_min
                wavelength_max = self._view.transmission_sample_wavelength_max
                state_model.transmission_sample_wavelength_min = wavelength_min
                state_model.transmission_sample_wavelength_max = wavelength_max
                state_model.transmission_can_wavelength_min = wavelength_min
                state_model.transmission_can_wavelength_max = wavelength_max
        else:
            # Sample
            use_fit_sample = self._view.transmission_sample_use_fit
            fit_type_sample = self._view.transmission_sample_fit_type
            polynomial_order_sample = self._view.transmission_sample_polynomial_order
            state_model.transmission_sample_fit_type = fit_type_sample if use_fit_sample else FitType.NoFit
            state_model.transmission_sample_polynomial_order = polynomial_order_sample

            # Wavelength settings
            if self._view.transmission_sample_use_wavelength:
                wavelength_min = self._view.transmission_sample_wavelength_min
                wavelength_max = self._view.transmission_sample_wavelength_max
                state_model.transmission_sample_wavelength_min = wavelength_min
                state_model.transmission_sample_wavelength_max = wavelength_max

            # Can
            use_fit_can = self._view.transmission_can_use_fit
            fit_type_can = self._view.transmission_can_fit_type
            polynomial_order_can = self._view.transmission_can_polynomial_order
            state_model.transmission_can_fit_type = fit_type_can if use_fit_can else FitType.NoFit
            state_model.transmission_can_polynomial_order = polynomial_order_can

            # Wavelength settings
            if self._view.transmission_can_use_wavelength:
                wavelength_min = self._view.transmission_can_wavelength_min
                wavelength_max = self._view.transmission_can_wavelength_max
                state_model.transmission_can_wavelength_min = wavelength_min
                state_model.transmission_can_wavelength_max = wavelength_max

    def _set_on_state_model_q_1d_rebin_string(self, state_model):
        q_1d_step_type = self._view.q_1d_step_type

        # If we are dealing with a simple rebin string then the step type is None
        if self._view.q_1d_step_type is None:
            state_model.q_1d_rebin_string = self._view.q_1d_min_or_rebin_string
        else:
            q_1d_min = self._view.q_1d_min_or_rebin_string
            q_1d_max = self._view.q_1d_max
            q_1d_step = self._view.q_1d_step
            if q_1d_min and q_1d_max and q_1d_step and q_1d_step_type:
                q_1d_rebin_string = str(q_1d_min) + ","
                q_1d_step_type_factor = -1. if q_1d_step_type is RangeStepType.Log else 1.
                q_1d_rebin_string += str(
                    q_1d_step_type_factor * q_1d_step) + ","
                q_1d_rebin_string += str(q_1d_max)
                state_model.q_1d_rebin_string = q_1d_rebin_string

    def _set_on_state_model(self, attribute_name, state_model):
        attribute = getattr(self._view, attribute_name)
        if attribute is not None and attribute != '':
            setattr(state_model, attribute_name, attribute)

    def get_cell_value(self, row, column):
        return self._view.get_cell(row=row,
                                   column=self.table_index[column],
                                   convert_to=str)

    # ------------------------------------------------------------------------------------------------------------------
    # Settings
    # ------------------------------------------------------------------------------------------------------------------
    def _setup_instrument_specific_settings(self, instrument=None):
        if not instrument:
            instrument = self._view.instrument

        self._view.set_instrument_settings(instrument)
        self._beam_centre_presenter.on_update_instrument(instrument)
        self._workspace_diagnostic_presenter.set_instrument_settings(
            instrument)
示例#18
0
class HidraProjectFile(object):
    '''Read and/or write an HB2B project to an HDF5 with entries for detector counts, sample logs, reduced data,
    fitted peaks and etc.
    All the import/export information will be buffered in order to avoid exception during operation

    File structure:
    - experiment
        - scans (raw counts)
        - logs
    - instrument
        - calibration
    - reduced diffraction data
        - main
          - sub-run
          - ...
        - mask_A
          - sub-run
          - ...
        - mask_B
          - sub-run
          - ...
    '''
    def __init__(self, project_file_name, mode=HidraProjectFileMode.READONLY):
        """
        Initialization
        :param project_file_name: project file name
        :param mode: I/O mode
        """
        # configure logging for this class
        self._log = Logger(__name__)

        # convert the mode to the enum
        self._io_mode = HidraProjectFileMode.getMode(mode)

        # check the file
        if not project_file_name:
            raise RuntimeError('Must supply a filename')
        self._file_name = str(project_file_name)  # force it to be a string
        self._checkFileAccess()

        # open the file using h5py
        self._project_h5 = h5py.File(self._file_name, mode=str(self._io_mode))
        if self._io_mode == HidraProjectFileMode.OVERWRITE:
            self._init_project()

    def _checkFileAccess(self):
        '''Verify the file has the correct acces permissions and set the value of ``self._is_writable``
        '''
        # prepare the call to check the file permissions
        check_exist = ((self._io_mode == HidraProjectFileMode.READONLY)
                       or (self._io_mode == HidraProjectFileMode.READWRITE))
        self._is_writable = (not self._io_mode
                             == HidraProjectFileMode.READONLY)

        # create a custom message based on requested access mode
        if self._io_mode == HidraProjectFileMode.READONLY:
            description = 'Read-only project file'
        elif self._io_mode == HidraProjectFileMode.OVERWRITE:
            description = 'Write-only project file'
        elif self._io_mode == HidraProjectFileMode.READWRITE:
            description = 'Append-mode project file'
        else:  # this should never happen
            raise RuntimeError(
                'Hidra project file I/O mode {} is not supported'.format(
                    HidraProjectFileMode))

        # convert the filename to an absolute path so error messages are clearer
        self._file_name = os.path.abspath(self._file_name)

        # do the check
        checkdatatypes.check_file_name(self._file_name,
                                       check_exist,
                                       self._is_writable,
                                       is_dir=False,
                                       description=description)

    def _init_project(self):
        """
        initialize the current opened project file from scratch by opening it
        """
        assert self._project_h5 is not None, 'cannot be None'
        assert self._is_writable, 'must be writable'

        # data
        exp_entry = self._project_h5.create_group(HidraConstants.RAW_DATA)
        exp_entry.create_group(HidraConstants.SUB_RUNS)
        exp_entry.create_group(HidraConstants.SAMPLE_LOGS)

        # instrument
        instrument = self._project_h5.create_group(HidraConstants.INSTRUMENT)
        instrument.create_group(HidraConstants.CALIBRATION)
        # geometry
        geometry_group = instrument.create_group('geometry setup')
        geometry_group.create_group('detector')
        geometry_group.create_group('wave length')
        # detector (pixel) efficiency
        instrument.create_group(HidraConstants.DETECTOR_EFF)

        # mask entry and 2 sub entries
        mask_entry = self._project_h5.create_group(HidraConstants.MASK)
        mask_entry.create_group(HidraConstants.DETECTOR_MASK)
        mask_entry.create_group(HidraConstants.SOLID_ANGLE_MASK)

        # peaks
        self._project_h5.create_group('peaks')

        # reduced data
        self._project_h5.create_group(HidraConstants.REDUCED_DATA)

    def __del__(self):
        self.close()

    @property
    def name(self):
        """
        File name on HDD
        """
        return self._project_h5.name

    def append_raw_counts(self, sub_run_number, counts_array):
        """Add raw detector counts collected in a single scan/Pt

        Parameters
        ----------
        sub_run_number : int
            sub run number
        counts_array : ~numpy.ndarray
            detector counts
        """
        # check
        assert self._project_h5 is not None, 'cannot be None'
        assert self._is_writable, 'must be writable'
        checkdatatypes.check_int_variable('Sub-run index', sub_run_number,
                                          (0, None))

        # create group
        scan_i_group = self._project_h5[HidraConstants.RAW_DATA][
            HidraConstants.SUB_RUNS].create_group(
                '{:04}'.format(sub_run_number))
        scan_i_group.create_dataset('counts', data=counts_array.reshape(-1))

    def append_experiment_log(self, log_name, log_value_array):
        """ add information about the experiment including scan indexes, sample logs, 2theta and etc
        :param log_name: name of the sample log
        :param log_value_array:
        """
        # check
        assert self._project_h5 is not None, 'cannot be None'
        assert self._is_writable, 'must be writable'
        checkdatatypes.check_string_variable('Log name', log_name)

        try:
            self._log.debug('Add sample log: {}'.format(log_name))
            self._project_h5[HidraConstants.RAW_DATA][
                HidraConstants.SAMPLE_LOGS].create_dataset(
                    log_name, data=log_value_array)
        except RuntimeError as run_err:
            raise RuntimeError('Unable to add log {} due to {}'.format(
                log_name, run_err))
        except TypeError as type_err:
            raise RuntimeError(
                'Failed to add log {} with value {} of type {}: {}'
                ''.format(log_name, log_value_array, type(log_value_array),
                          type_err))

    def read_default_masks(self):
        """Read default mask, i.e., for pixels at the edges

        Returns
        -------
        numpy.ndarray
            array for mask.  None for no mask

        """
        try:
            mask_name = HidraConstants.DEFAULT_MASK
            default_mask = self.read_mask_detector_array(mask_name)
        except RuntimeError:
            default_mask = None

        return default_mask

    def read_user_masks(self, mask_dict):
        """Read user-specified masks

        Parameters
        ----------
        mask_dict : dict
            dictionary to store masks (array)

        Returns
        -------
        None

        """
        # Get mask names except default mask
        try:
            mask_names = sorted(self._project_h5[HidraConstants.MASK][
                HidraConstants.DETECTOR_MASK].keys())
        except KeyError:
            # return if the file has an old format
            return
        if HidraConstants.DEFAULT_MASK in mask_names:
            mask_names.remove(HidraConstants.DEFAULT_MASK)

        # Read mask one by one
        for mask_name in mask_names:
            mask_dict[mask_name] = self.read_mask_detector_array(mask_name)

    def read_mask_detector_array(self, mask_name):
        """Get the mask from hidra project file (.h5) in the form of numpy array

        Location
          root
            - mask
                - detector
                     - mask_name

        Parameters
        ----------
        mask_name : str
            name of mask

        Returns
        -------
        numpy.ndarray
            mask array

        """
        try:
            mask_array = self._project_h5[HidraConstants.MASK][
                HidraConstants.DETECTOR_MASK][mask_name].value
        except KeyError as key_err:
            if HidraConstants.MASK not in self._project_h5.keys():
                err_msg = 'Project file {} does not have "{}" entry.  Its format is not up-to-date.' \
                          ''.format(self._file_name, HidraConstants.MASK)
            elif HidraConstants.DETECTOR_MASK not in self._project_h5[
                    HidraConstants.MASK]:
                err_msg = 'Project file {} does not have "{}" entry under {}. ' \
                          'Its format is not up-to-date.' \
                          ''.format(self._file_name, HidraConstants.DETECTOR_MASK, HidraConstants.MASK)
            else:
                err_msg = 'Detector mask {} does not exist.  Available masks are {}.' \
                          ''.format(mask_name, self._project_h5[HidraConstants.MASK].keys())
            raise RuntimeError('{}\nFYI: {}'.format(err_msg, key_err))

        return mask_array

    def write_mask_detector_array(self, mask_name, mask_array):
        """Write detector mask

        Structure:
          root
            - mask
                - detector
                     - default/universal
                     - mask_name


        Parameters
        ----------
        mask_name : str or None
            mask name.  None for default/universal detector mask
        mask_array : numpy.ndarray
            (N, ), masks, 0 for masking, 1 for ROI

        Returns
        -------
        None

        """
        # Set the default case
        if mask_name is None:
            mask_name = HidraConstants.DEFAULT_MASK

        if mask_name in self._project_h5[HidraConstants.MASK][
                HidraConstants.DETECTOR_MASK]:
            # delete the existing mask
            del self._project_h5[HidraConstants.MASK][
                HidraConstants.DETECTOR_MASK][mask_name]

        # add new detector mask (array)
        self._project_h5[HidraConstants.MASK][
            HidraConstants.DETECTOR_MASK].create_dataset(mask_name,
                                                         data=mask_array)

    def write_mask_solid_angle(self, mask_name, solid_angle_bin_edges):
        """
        Add mask in the form of solid angle
        Location: ..../main entry/mask/solid angle/
        data will be a range of solid angles and number of patterns to generate.
        example solid angle range = -8, 8, number of pattern = 3

        :param solid_angle_bin_edges: numpy 1D array as s0, s1, s2, ...
        """
        # Clean previously set if name exists
        if mask_name in self._project_h5[HidraConstants.MASK][
                HidraConstants.SOLID_ANGLE_MASK]:
            del self._project_h5[HidraConstants.MASK][
                HidraConstants.SOLID_ANGLE_MASK][mask_name]

        # Add new mask in
        solid_angle_entry = self._project_h5[HidraConstants.MASK][
            HidraConstants.SOLID_ANGLE_MASK]
        solid_angle_entry.create_dataset(mask_name, data=solid_angle_bin_edges)

    def read_mask_solid_angle(self, mask_name):
        """Get the masks in the form of solid angle bin edges
        """
        try:
            mask_array = self._project_h5[HidraConstants.MASK][
                HidraConstants.SOLID_ANGLE_MASK][mask_name]
        except KeyError as key_err:
            raise RuntimeError(
                'Detector mask {} does not exist.  Available masks are {}. FYI: {}'
                ''.format(
                    mask_name, self._project_h5[HidraConstants.MASK][
                        HidraConstants.SOLID_ANGLE_MASK].keys(), key_err))

        return mask_array

    def close(self):
        '''
        Close the file without checking whether the file can be written or not. This can
        be called multiple times without issue.
        '''
        if self._project_h5 is not None:
            self._project_h5.close()
            self._project_h5 = None  #
            self._log.information('File {} is closed'.format(self._file_name))

    def save(self, verbose=False):
        """
        convert all the information about project to HDF file.
        As the data has been written to h5.File instance already, the only thing left is to close the file
        """
        self._validate_write_operation()

        if verbose:
            self._log.information(
                'Changes are saved to {0}. File is now closed.'.format(
                    self._project_h5.filename))

        self.close()

    def read_diffraction_2theta_array(self):
        """Get the (reduced) diffraction data's 2-theta vector

        Returns
        -------
        numpy.ndarray
            1D vector for unified 2theta vector for all sub runs
            2D array for possibly various 2theta vector for each

        """
        if HidraConstants.TWO_THETA not in self._project_h5[
                HidraConstants.REDUCED_DATA]:
            # FIXME - This is a patch for 'legacy' data.  It will be removed after codes are stable
            tth_key = '2Theta'
        else:
            tth_key = HidraConstants.TWO_THETA

        two_theta_vec = self._project_h5[
            HidraConstants.REDUCED_DATA][tth_key].value

        return two_theta_vec

    def read_diffraction_intensity_vector(self, mask_id, sub_run):
        """ Get the (reduced) diffraction data's intensity
        :param mask_id:
        :param sub_run: If sub run = None: ...
        :return: 1D array or 2D array depending on sub ru
        """
        # Get default for mask/main
        if mask_id is None:
            mask_id = HidraConstants.REDUCED_MAIN

        checkdatatypes.check_string_variable(
            'Mask ID', mask_id,
            list(self._project_h5[HidraConstants.REDUCED_DATA].keys()))

        # Get data to return
        if sub_run is None:
            # all the sub runs
            reduced_diff_hist = self._project_h5[
                HidraConstants.REDUCED_DATA][mask_id].value
        else:
            # specific one sub run
            sub_run_list = self.read_sub_runs()
            sub_run_index = sub_run_list.index(sub_run)

            if mask_id is None:
                mask_id = HidraConstants.REDUCED_MAIN

            reduced_diff_hist = self._project_h5[
                HidraConstants.REDUCED_DATA][mask_id].value[sub_run_index]
        # END-IF-ELSE

        return reduced_diff_hist

    def read_diffraction_variance_vector(self, mask_id, sub_run):
        """ Get the (reduced) diffraction data's intensity
        :param mask_id:
        :param sub_run: If sub run = None: ...
        :return: 1D array or 2D array depending on sub ru
        """
        # Get default for mask/main
        if mask_id is None:
            mask_id = HidraConstants.REDUCED_MAIN

        if '_var' not in mask_id:
            mask_id += '_var'

        try:
            checkdatatypes.check_string_variable(
                'Mask ID', mask_id,
                list(self._project_h5[HidraConstants.REDUCED_DATA].keys()))

            # Get data to return
            if sub_run is None:
                # all the sub runs
                reduced_variance_hist = self._project_h5[
                    HidraConstants.REDUCED_DATA][mask_id].value
            else:
                # specific one sub run
                sub_run_list = self.read_sub_runs()
                sub_run_index = sub_run_list.index(sub_run)

                if mask_id is None:
                    mask_id = HidraConstants.REDUCED_MAIN

                if '_var' not in mask_id:
                    mask_id += '_var'

                reduced_variance_hist = self._project_h5[
                    HidraConstants.REDUCED_DATA][mask_id].value[sub_run_index]
            # END-IF-ELSE
        except ValueError:
            reduced_variance_hist = None

        return reduced_variance_hist

    def read_diffraction_masks(self):
        """
        Get the list of masks
        """
        masks = list(self._project_h5[HidraConstants.REDUCED_DATA].keys())

        # Clean up data entry '2theta' (or '2Theta')
        if HidraConstants.TWO_THETA in masks:
            masks.remove(HidraConstants.TWO_THETA)

        # FIXME - Remove when Hidra-16_Log.h5 is fixed with correction entry name as '2theta'
        # (aka HidraConstants.TWO_THETA)
        if '2Theta' in masks:
            masks.remove('2Theta')

        return masks

    def read_instrument_geometry(self):
        """
        Get instrument geometry parameters
        :return: an instance of instrument_geometry.InstrumentSetup
        """
        # Get group
        geometry_group = self._project_h5[HidraConstants.INSTRUMENT][
            HidraConstants.GEOMETRY_SETUP]
        detector_group = geometry_group[HidraConstants.DETECTOR_PARAMS]

        # Get value
        num_rows, num_cols = detector_group['detector size'].value
        pixel_size_x, pixel_size_y = detector_group['pixel dimension'].value
        arm_length = detector_group['L2'].value

        # Initialize
        instrument_setup = AnglerCameraDetectorGeometry(
            num_rows=num_rows,
            num_columns=num_cols,
            pixel_size_x=pixel_size_x,
            pixel_size_y=pixel_size_y,
            arm_length=arm_length,
            calibrated=False)

        return instrument_setup

    def read_sample_logs(self):
        """Get sample logs

        Retrieve all the (sample) logs from Hidra project file.
        Raw information retrieved from rs project file is numpy arrays

        Returns
        -------
        ndarray, dict
            ndarray : 1D array for sub runs
            dict : dict[sample log name] for sample logs in ndarray
        """
        # Get the group
        logs_group = self._project_h5[HidraConstants.RAW_DATA][
            HidraConstants.SAMPLE_LOGS]

        if HidraConstants.SUB_RUNS not in logs_group.keys():
            raise RuntimeError(
                'Failed to find {} in {} group of the file'.format(
                    HidraConstants.SUB_RUNS, HidraConstants.SAMPLE_LOGS))

        # Get 2theta and others
        samplelogs = SampleLogs()
        # first set subruns
        samplelogs[HidraConstants.SUB_RUNS] = logs_group[
            HidraConstants.SUB_RUNS].value
        for log_name in logs_group.keys():
            samplelogs[log_name] = logs_group[log_name].value

        return samplelogs

    def read_log_value(self, log_name):
        """Get a log's value

        Parameters
        ----------
        log_name

        Returns
        -------
        ndarray or single value
        """
        assert self._project_h5 is not None, 'Project HDF5 is not loaded yet'

        log_value = self._project_h5[HidraConstants.RAW_DATA][
            HidraConstants.SAMPLE_LOGS][log_name]

        return log_value

    def read_raw_counts(self, sub_run):
        """
        get the raw detector counts
        """
        assert self._project_h5 is not None, 'blabla'
        checkdatatypes.check_int_variable('sun run', sub_run, (0, None))

        sub_run_str = '{:04}'.format(sub_run)
        try:
            counts = self._project_h5[HidraConstants.RAW_DATA][
                HidraConstants.SUB_RUNS][sub_run_str]['counts'].value
        except KeyError as key_error:
            err_msg = 'Unable to access sub run {} with key {}: {}\nAvailable runs are: {}' \
                      ''.format(sub_run, sub_run_str, key_error,
                                self._project_h5[HidraConstants.RAW_DATA][HidraConstants.SUB_RUNS].keys())
            raise KeyError(err_msg)

        return counts

    def read_sub_runs(self):
        """
        get list of the sub runs
        """
        self._log.debug(str(self._project_h5.keys()))
        self._log.debug(self._file_name)
        # coded a little wacky to be less than 120 characters across
        sub_runs_str_list = self._project_h5[HidraConstants.RAW_DATA][
            HidraConstants.SAMPLE_LOGS]
        if HidraConstants.SUB_RUNS in sub_runs_str_list:
            sub_runs_str_list = sub_runs_str_list[
                HidraConstants.SUB_RUNS].value
        else:
            sub_runs_str_list = []

        self._log.debug('.... Sub runs: {}'.format(sub_runs_str_list))

        sub_run_list = [None] * len(sub_runs_str_list)
        for index, sub_run_str in enumerate(sub_runs_str_list):
            sub_run_list[index] = int(sub_run_str)

        self._log.debug('.... Sub runs: {}'.format(sub_run_list))

        return sub_run_list

    def write_instrument_geometry(self, instrument_setup):
        """
        Add instrument geometry and wave length information to project file
        """
        # check inputs
        self._validate_write_operation()
        checkdatatypes.check_type('Instrument geometry setup',
                                  instrument_setup, HidraSetup)

        # write value to instrument
        instrument_group = self._project_h5[HidraConstants.INSTRUMENT]

        # write attributes
        instrument_group.attrs['name'] = instrument_setup.name

        # get the entry for raw instrument setup
        detector_group = instrument_group['geometry setup']['detector']
        raw_geometry = instrument_setup.get_instrument_geometry(False)
        detector_group.create_dataset('L2',
                                      data=numpy.array(
                                          raw_geometry.arm_length))
        det_size = numpy.array(
            instrument_setup.get_instrument_geometry(False).detector_size)
        detector_group.create_dataset('detector size', data=det_size)
        pixel_dimension = list(
            instrument_setup.get_instrument_geometry(False).pixel_dimension)
        detector_group.create_dataset('pixel dimension',
                                      data=numpy.array(pixel_dimension))

        # wave length
        wavelength_group = instrument_group[HidraConstants.GEOMETRY_SETUP][
            HidraConstants.WAVELENGTH]
        try:
            wl = instrument_setup.get_wavelength(None)
        except (NotImplementedError, RuntimeError) as run_err:
            # No wave length from workspace: do nothing
            self._log.error(str(run_err))
            wl = None

        # Set wave length
        if wl is not None:
            wavelength_group.create_dataset('Calibrated',
                                            data=numpy.array([wl]))

    def read_peak_tags(self):
        """Get all the tags of peaks with parameters stored in HiDRA project

        Returns
        -------
        ~list
            list of string for all the peak tags

        """
        # Get main group
        peak_main_group = self._project_h5[HidraConstants.PEAKS]

        return peak_main_group.keys()

    def read_peak_parameters(self, peak_tag):
        """Get the parameters related to a peak

        The parameters including
        (1) peak profile (2) sub runs (3) chi2 (4) parameter names (5) parameter values

        Returns
        -------
        ~pyrs.peaks.peak_collection.PeakCollection
            All of the information from fitting a peak across subruns
        """
        # Get main group
        peak_main_group = self._project_h5[HidraConstants.PEAKS]

        # Get peak entry
        if peak_tag not in peak_main_group.keys():
            raise RuntimeError('Peak tag {} cannot be found'.format(peak_tag))
        peak_entry = peak_main_group[peak_tag]

        # Get all the attribute and data
        profile = peak_entry.attrs[HidraConstants.PEAK_PROFILE]
        background = peak_entry.attrs[HidraConstants.BACKGROUND_TYPE]
        sub_run_array = peak_entry[HidraConstants.SUB_RUNS].value
        chi2_array = peak_entry[HidraConstants.PEAK_FIT_CHI2].value
        param_values = peak_entry[HidraConstants.PEAK_PARAMS].value
        error_values = peak_entry[HidraConstants.PEAK_PARAMS_ERROR].value

        # validate the information makes sense
        if param_values.shape != error_values.shape:
            raise RuntimeError(
                'Parameters[{}] and Errors[{}] have different shape'.format(
                    param_values.shape, error_values.shape))
        peak_collection = PeakCollection(peak_tag, profile, background)
        peak_collection.set_peak_fitting_values(subruns=sub_run_array,
                                                parameter_values=param_values,
                                                parameter_errors=error_values,
                                                fit_costs=chi2_array)

        # Optionally for strain: reference peak center in dSpacing: (strain)
        if HidraConstants.D_REFERENCE in list(peak_entry.keys()):
            # If reference position D is ever written to this project
            ref_d_array = peak_entry[HidraConstants.D_REFERENCE].value
            # set to PeakCollection
            peak_collection.set_d_reference(ref_d_array)

        return peak_collection

    def write_peak_parameters(self, fitted_peaks):
        """Set the peak fitting results to project file.

         The tree structure for fitted peak in all sub runs is defined as
        - peaks
            - [peak-tag]
                - attr/'peak profile'
                - sub runs
                - parameter values
                - parameter fitting error

        Parameters
        ----------
        fitted_peaks : pyrs.core.peak_collection.PeakCollection

        Returns
        -------

        """
        # Check inputs and file status
        self._validate_write_operation()

        # Get value from peak collection
        peak_tag = fitted_peaks.peak_tag
        peak_profile = str(fitted_peaks.peak_profile)
        background_type = str(fitted_peaks.background_type)

        checkdatatypes.check_string_variable('Peak tag', peak_tag)
        checkdatatypes.check_string_variable('Peak profile', peak_profile)
        checkdatatypes.check_string_variable('Background type',
                                             background_type)

        # access or create node for peak with given tag
        peak_main_group = self._project_h5[HidraConstants.PEAKS]

        if peak_tag not in peak_main_group:
            # create peak-tag entry if it does not exist
            single_peak_entry = peak_main_group.create_group(peak_tag)
        else:
            # if peak-tag entry, get the reference to the entry
            single_peak_entry = peak_main_group[peak_tag]

        # Attributes
        self.set_attributes(single_peak_entry, HidraConstants.PEAK_PROFILE,
                            peak_profile)
        self.set_attributes(single_peak_entry, HidraConstants.BACKGROUND_TYPE,
                            background_type)

        single_peak_entry.create_dataset(HidraConstants.SUB_RUNS,
                                         data=fitted_peaks.sub_runs)
        single_peak_entry.create_dataset(HidraConstants.PEAK_FIT_CHI2,
                                         data=fitted_peaks.fitting_costs)
        peak_values, peak_errors = fitted_peaks.get_native_params()
        single_peak_entry.create_dataset(HidraConstants.PEAK_PARAMS,
                                         data=peak_values)
        single_peak_entry.create_dataset(HidraConstants.PEAK_PARAMS_ERROR,
                                         data=peak_errors)

        # Reference peak center in dSpacing: (strain)
        ref_d_array = fitted_peaks.get_d_reference()
        if isinstance(ref_d_array, numpy.ndarray):
            # if reference peak position in D is set
            single_peak_entry.create_dataset(HidraConstants.D_REFERENCE,
                                             data=ref_d_array)
        elif not numpy.isnan(ref_d_array):
            # single non-NaN value
            num_subruns = len(fitted_peaks.sub_runs)
            single_peak_entry.create_dataset(HidraConstants.D_REFERENCE,
                                             data=numpy.array([ref_d_array] *
                                                              num_subruns))

    def read_wavelengths(self):
        """Get calibrated wave length

        Returns
        -------
        Float
            Calibrated wave length.  NaN for wave length is not ever set
        """
        # Init wave length
        wl = numpy.nan

        # Get the node
        try:
            mono_node = self._project_h5[HidraConstants.INSTRUMENT][
                HidraConstants.MONO]
            if HidraConstants.WAVELENGTH in mono_node:
                wl = self._project_h5[HidraConstants.INSTRUMENT][
                    HidraConstants.MONO][HidraConstants.WAVELENGTH].value
                if wl.shape[0] == 0:
                    # empty numpy array: no data. keep as nan
                    pass
                elif wl.shape[0] == 1:
                    # 1 calibrated wave length
                    wl = wl[0]
                else:
                    # not supported
                    raise RuntimeError(
                        'There are more than 1 wave length registered')
                    # END-IF
        except KeyError:
            # monochromator node does not exist
            self._log.error('Node {} does not exist in HiDRA project file {}'
                            ''.format(HidraConstants.MONO, self._file_name))
        # END

        return wl

    def write_wavelength(self, wave_length):
        """ Set the calibrated wave length
        Location:
          .../instrument/monochromator setting/ ... .../
        Note:
        - same wave length to all sub runs
        - only calibrated wave length in project file
        - raw wave length comes from a table with setting
        :param wave_length: wave length in A
        :return: None
        """
        checkdatatypes.check_float_variable('Wave length', wave_length,
                                            (0, 1000))

        # Create 'monochromator setting' node if it does not exist
        if HidraConstants.MONO not in list(
                self._project_h5[HidraConstants.INSTRUMENT].keys()):
            self._project_h5[HidraConstants.INSTRUMENT].create_group(
                HidraConstants.MONO)

        # Get node and write value
        wl_entry = self._project_h5[HidraConstants.INSTRUMENT][
            HidraConstants.MONO]
        # delete the dataset if it does exist to replace
        if HidraConstants.WAVELENGTH in list(wl_entry.keys()):
            del wl_entry[HidraConstants.WAVELENGTH]
        wl_entry.create_dataset(HidraConstants.WAVELENGTH,
                                data=numpy.array([wave_length]))

    def read_efficiency_correction(self):
        """
        Set detector efficiency correction measured from vanadium (efficiency correction)
        Returns
        -------
        numpy ndarray
            Efficiency array
        """
        calib_run_number = \
            self._project_h5[HidraConstants.INSTRUMENT][HidraConstants.DETECTOR_EFF].attrs[HidraConstants.RUN]

        det_eff_array =\
            self._project_h5[HidraConstants.INSTRUMENT][HidraConstants.DETECTOR_EFF]['{}'.format(calib_run_number)]

        return det_eff_array

    def write_efficiency_correction(self, calib_run_number, eff_array):
        """ Set detector efficiency correction measured from vanadium (efficiency correction)
        Location: ... /main entry/calibration/efficiency:
        Data: numpy array with 1024**2...
        Attribute: add the run number created from to the attribute
        Parameters
        ----------
        calib_run_number : integer
            Run number where the efficiency calibration comes from
        eff_array : numpy ndarray (1D)
            Detector (pixel) efficiency
        """
        # Add attribute
        self._project_h5[HidraConstants.INSTRUMENT][HidraConstants.DETECTOR_EFF].attrs[HidraConstants.RUN] = \
            calib_run_number

        # Set data
        self._project_h5[HidraConstants.INSTRUMENT][
            HidraConstants.DETECTOR_EFF].create_dataset(
                '{}'.format(calib_run_number), data=eff_array)

    def write_information(self, info_dict):
        """
        set project information to attributes
        """
        # check and validate
        checkdatatypes.check_dict('Project file general information',
                                  info_dict)
        self._validate_write_operation()

        for info_name in info_dict:
            self._project_h5.attrs[info_name] = info_dict[info_name]

    def write_reduced_diffraction_data_set(self, two_theta_array,
                                           diff_data_set, var_data_set):
        """Set the reduced diffraction data (set)

        Parameters
        ----------
        two_theta_array : numppy.ndarray
            2D array for 2-theta vector, which could be various to each other among sub runs
        diff_data_set : dict
            dictionary of 2D arrays for reduced diffraction patterns' intensities
        var_data_set : dict
            dictionary of 2D arrays for reduced diffraction patterns' variances
        """
        # Check input
        checkdatatypes.check_numpy_arrays('Two theta vector',
                                          [two_theta_array], 2, False)
        checkdatatypes.check_dict('Diffraction data set', diff_data_set)

        # Retrieve diffraction group
        diff_group = self._project_h5[HidraConstants.REDUCED_DATA]

        # Add 2theta vector
        if HidraConstants.TWO_THETA in diff_group.keys():
            # over write data
            try:
                diff_group[HidraConstants.TWO_THETA][...] = two_theta_array
            except TypeError:
                # usually two theta vector size changed
                del diff_group[HidraConstants.TWO_THETA]
                diff_group.create_dataset(HidraConstants.TWO_THETA,
                                          data=two_theta_array)
        else:
            # new data
            diff_group.create_dataset(HidraConstants.TWO_THETA,
                                      data=two_theta_array)

        # Add Diffraction data
        for mask_id in diff_data_set:
            # Get data
            diff_data_matrix_i = diff_data_set[mask_id]
            self._log.information('Mask {} data set shape: {}'.format(
                mask_id, diff_data_matrix_i.shape))
            # Check
            checkdatatypes.check_numpy_arrays('Diffraction data (matrix)',
                                              [diff_data_matrix_i], None,
                                              False)
            if two_theta_array.shape != diff_data_matrix_i.shape:
                raise RuntimeError(
                    'Length of 2theta vector ({}) is different from intensities ({})'
                    ''.format(two_theta_array.shape, diff_data_matrix_i.shape))
            # Set name for default mask
            if mask_id is None:
                data_name = HidraConstants.REDUCED_MAIN
            else:
                data_name = mask_id

            # Write
            if data_name in diff_group.keys():
                # overwrite
                diff_h5_data = diff_group[data_name]
                try:
                    diff_h5_data[...] = diff_data_matrix_i
                except TypeError:
                    # usually two theta vector size changed
                    del diff_group[data_name]
                    diff_group.create_dataset(data_name,
                                              data=diff_data_matrix_i)
            else:
                # new
                diff_group.create_dataset(data_name, data=diff_data_matrix_i)

        # Add Variances data
        if var_data_set is None:
            var_data_set = diff_data_set
            for mask_id in var_data_set:
                var_data_set[mask_id] = numpy.sqrt(var_data_set[mask_id])

        for mask_id in var_data_set:
            # Get data
            var_data_matrix_i = var_data_set[mask_id]
            self._log.information('Mask {} data set shape: {}'.format(
                mask_id, var_data_matrix_i.shape))
            # Check
            checkdatatypes.check_numpy_arrays('Diffraction data (matrix)',
                                              [var_data_matrix_i], None, False)
            if two_theta_array.shape != var_data_matrix_i.shape:
                raise RuntimeError(
                    'Length of 2theta vector ({}) is different from intensities ({})'
                    ''.format(two_theta_array.shape, var_data_matrix_i.shape))
            # Set name for default mask
            if mask_id is None:
                data_name = HidraConstants.REDUCED_MAIN + '_var'
            else:
                data_name = mask_id + '_var'

            # Write
            if data_name in diff_group.keys():
                # overwrite
                diff_h5_data = diff_group[data_name]
                try:
                    diff_h5_data[...] = var_data_matrix_i
                except TypeError:
                    # usually two theta vector size changed
                    del diff_group[data_name]
                    diff_group.create_dataset(data_name,
                                              data=var_data_matrix_i)
            else:
                # new
                diff_group.create_dataset(data_name, data=var_data_matrix_i)

    def write_sub_runs(self, sub_runs):
        """ Set sub runs to sample log entry
        """
        if isinstance(sub_runs, list):
            sub_runs = numpy.array(sub_runs)
        else:
            checkdatatypes.check_numpy_arrays('Sub run numbers', [sub_runs], 1,
                                              False)

        sample_log_entry = self._project_h5[HidraConstants.RAW_DATA][
            HidraConstants.SAMPLE_LOGS]
        sample_log_entry.create_dataset(HidraConstants.SUB_RUNS, data=sub_runs)

    def _create_diffraction_node(self, sub_run_number):
        """ Create a node to record diffraction data
        It will check if such node already exists
        :exception: RuntimeError is raised if such 'sub run' node exists but not correct
        """
        # create a new node if it does not exist
        sub_run_group_name = '{0:04}'.format(sub_run_number)

        self._log.debug(
            'sub group entry name in hdf: {}'.format(sub_run_group_name))

        # check existing node or create a new node
        self._log.debug(
            'Diffraction node sub group/entries: {}'
            ''.format(self._project_h5[HidraConstants.REDUCED_DATA].keys()))
        if sub_run_group_name in self._project_h5[HidraConstants.REDUCED_DATA]:
            # sub-run node exist and check
            self._log('sub-group: {}'.format(sub_run_group_name))
            diff_group = self._project_h5[
                HidraConstants.REDUCED_DATA][sub_run_group_name]
            if not (DiffractionUnit.TwoTheta in diff_group
                    and DiffractionUnit.DSpacing in diff_group):
                raise RuntimeError(
                    'Diffraction node for sub run {} exists but is not complete'
                    .format(sub_run_number))
        else:
            # create new node: parent, child-2theta, child-dspacing
            diff_group = self._project_h5[
                HidraConstants.REDUCED_DATA].create_group(sub_run_group_name)
            diff_group.create_group(str(DiffractionUnit.TwoTheta))
            diff_group.create_group(str(DiffractionUnit.DSpacing))

        return diff_group

    def _validate_write_operation(self):
        """
        Validate whether a writing operation is allowed for this file
        :exception: RuntimeError
        """
        if not self._is_writable:
            raise RuntimeError(
                'Project file {} is set to read-only by user'.format(
                    self._project_h5.name))

    @staticmethod
    def set_attributes(h5_group, attribute_name, attribute_value):
        """
        Set attribute to a group
        """
        checkdatatypes.check_string_variable('Attribute name', attribute_name)

        h5_group.attrs[attribute_name] = attribute_value
示例#19
0
class SliceViewer(ObservingPresenter, SliceViewerBasePresenter):
    TEMPORARY_STATUS_TIMEOUT = 2000

    def __init__(self, ws, parent=None, window_flags=Qt.Window, model=None, view=None, conf=None):
        """
        Create a presenter for controlling the slice display for a workspace
        :param ws: Workspace containing data to display and slice
        :param parent: An optional parent widget
        :param window_flags: An optional set of window flags
        :param model: A model to define slicing operations. If None uses SliceViewerModel
        :param view: A view to display the operations. If None uses SliceViewerView
        """
        model: SliceViewerModel = model if model else SliceViewerModel(ws)
        self.view = view if view else SliceViewerView(self,
                                                      Dimensions.get_dimensions_info(ws),
                                                      model.can_normalize_workspace(), parent,
                                                      window_flags, conf)
        super().__init__(ws, self.view.data_view, model)

        self._logger = Logger("SliceViewer")
        self._peaks_presenter: PeaksViewerCollectionPresenter = None
        self._cutviewer_presenter = None
        self.conf = conf

        # Acts as a 'time capsule' to the properties of the model at this
        # point in the execution. By the time the ADS observer calls self.replace_workspace,
        # the workspace associated with self.model has already been changed.
        self.initial_model_properties = model.get_properties()
        self._new_plot_method, self.update_plot_data = self._decide_plot_update_methods()

        self.view.setWindowTitle(self.model.get_title())
        self.view.data_view.create_axes_orthogonal(
            redraw_on_zoom=not WorkspaceInfo.can_support_dynamic_rebinning(self.model.ws))

        if self.model.can_normalize_workspace():
            self.view.data_view.set_normalization(ws)
            self.view.data_view.norm_opts.currentTextChanged.connect(self.normalization_changed)
        if not self.model.can_support_peaks_overlays():
            self.view.data_view.disable_tool_button(ToolItemText.OVERLAY_PEAKS)
        # check whether to enable non-orthog view
        # don't know whether can always assume init with display indices (0,1) - so get sliceinfo
        sliceinfo = self.get_sliceinfo()
        if not sliceinfo.can_support_nonorthogonal_axes():
            self.view.data_view.disable_tool_button(ToolItemText.NONORTHOGONAL_AXES)
        if not self.model.can_support_non_axis_cuts():
            self.view.data_view.disable_tool_button(ToolItemText.NONAXISALIGNEDCUTS)

        self.view.data_view.help_button.clicked.connect(self.action_open_help_window)

        self.refresh_view()

        # Start the GUI with zoom selected.
        self.view.data_view.activate_tool(ToolItemText.ZOOM)

        self.ads_observer = SliceViewerADSObserver(self.replace_workspace, self.rename_workspace,
                                                   self.ADS_cleared, self.delete_workspace)

        # simulate clicking on the home button, which will force all signal and slot connections
        # properly set.
        # NOTE: Some part of the connections are not set in the correct, resulting in a strange behavior
        #       where the colorbar and view is not updated with switch between different scales.
        #       This is a ducktape fix and should be revisited once we have a better way to do this.
        # NOTE: This workaround solve the problem, but it leads to a failure in
        #       projectroot.qt.python.mantidqt_qt5.test_sliceviewer_presenter.test_sliceviewer_presenter
        #       Given that this issue is not of high priority, we are leaving it as is for now.
        # self.show_all_data_clicked()

    def new_plot(self, *args, **kwargs):
        self._new_plot_method(*args, **kwargs)

    def new_plot_MDH(self, dimensions_transposing=False, dimensions_changing=False):
        """
        Tell the view to display a new plot of an MDHistoWorkspace
        """
        data_view = self.view.data_view
        limits = data_view.get_axes_limits()

        if limits is None or not WorkspaceInfo.can_support_dynamic_rebinning(self.model.ws):
            data_view.plot_MDH(self.model.get_ws(), slicepoint=self.get_slicepoint())
            self._call_peaks_presenter_if_created("notify", PeaksViewerPresenter.Event.OverlayPeaks)
        else:
            self.new_plot_MDE(dimensions_transposing, dimensions_changing)

    def new_plot_MDE(self, dimensions_transposing=False, dimensions_changing=False):
        """
        Tell the view to display a new plot of an MDEventWorkspace
        """
        data_view = self.view.data_view
        limits = data_view.get_axes_limits()

        # The value at the i'th index of this tells us that the axis with that value (0 or 1) will display dimension i
        dimension_indices = self.view.dimensions.get_states()

        if dimensions_transposing:
            # Since the dimensions are transposing, the limits we have from the view are the wrong way around
            # with respect to the axes the dimensions are about to be displayed, so get the previous dimension states.
            dimension_indices = self.view.dimensions.get_previous_states()
        elif dimensions_changing:
            # If we are changing which dimensions are to be displayed, the limits we got from the view are stale
            # as they refer to the previous two dimensions that were displayed.
            limits = None

        data_view.plot_MDH(
            self.model.get_ws_MDE(slicepoint=self.get_slicepoint(),
                                  bin_params=data_view.dimensions.get_bin_params(),
                                  limits=limits,
                                  dimension_indices=dimension_indices))
        self._call_peaks_presenter_if_created("notify", PeaksViewerPresenter.Event.OverlayPeaks)

    def update_plot_data_MDH(self):
        """
        Update the view to display an updated MDHistoWorkspace slice/cut
        """
        self.view.data_view.update_plot_data(
            self.model.get_data(self.get_slicepoint(),
                                transpose=self.view.data_view.dimensions.transpose))

    def update_plot_data_MDE(self):
        """
        Update the view to display an updated MDEventWorkspace slice/cut
        """
        data_view = self.view.data_view
        data_view.update_plot_data(
            self.model.get_data(self.get_slicepoint(),
                                bin_params=data_view.dimensions.get_bin_params(),
                                dimension_indices=data_view.dimensions.get_states(),
                                limits=data_view.get_axes_limits(),
                                transpose=self.view.data_view.dimensions.transpose))

    def update_plot_data_matrix(self):
        # should never be called, since this workspace type is only 2D the plot dimensions never change
        pass

    def get_frame(self) -> SpecialCoordinateSystem:
        """Returns frame of workspace - require access for adding a peak in peaksviewer"""
        return self.model.get_frame()

    def get_sliceinfo(self, force_nonortho_mode: bool = False):
        """
        :param force_nonortho_mode: if True then don't use orthogonal angles even if non_ortho mode == False - this
            is necessary because when non-ortho view is toggled the data_view is not updated at the point a new
            SliceInfo is created
        :return: a SliceInfo object describing the current slice and transform (which by default will be orthogonal
                 if non-ortho mode is False)
        """
        dimensions = self.view.data_view.dimensions
        non_ortho_mode = True if force_nonortho_mode else self.view.data_view.nonorthogonal_mode
        axes_angles = self.model.get_axes_angles(force_orthogonal=not non_ortho_mode)  # None if can't support transform
        return SliceInfo(point=dimensions.get_slicepoint(),
                         transpose=dimensions.transpose,
                         range=dimensions.get_slicerange(),
                         qflags=dimensions.qflags,
                         axes_angles=axes_angles)

    def get_proj_matrix(self):
        return self.model.get_proj_matrix()

    def get_axes_limits(self):
        return self.view.data_view.get_axes_limits()

    def dimensions_changed(self):
        """Indicates that the dimensions have changed"""
        data_view = self._data_view
        sliceinfo = self.get_sliceinfo()
        if data_view.nonorthogonal_mode:
            if sliceinfo.can_support_nonorthogonal_axes():
                # axes need to be recreated to have the correct transform associated
                data_view.create_axes_nonorthogonal(sliceinfo.get_northogonal_transform())
            else:
                data_view.disable_tool_button(ToolItemText.NONORTHOGONAL_AXES)
                data_view.create_axes_orthogonal()
        else:
            if sliceinfo.can_support_nonorthogonal_axes():
                data_view.enable_tool_button(ToolItemText.NONORTHOGONAL_AXES)
            else:
                data_view.disable_tool_button(ToolItemText.NONORTHOGONAL_AXES)

        ws_type = WorkspaceInfo.get_ws_type(self.model.ws)
        if ws_type == WS_TYPE.MDH or ws_type == WS_TYPE.MDE:
            if self.model.get_number_dimensions() > 2 and \
                    sliceinfo.slicepoint[data_view.dimensions.get_previous_states().index(None)] is None:
                # The dimension of the slicepoint has changed
                self.new_plot(dimensions_changing=True)
            else:
                self.new_plot(dimensions_transposing=True)
        else:
            self.new_plot()
        self._call_cutviewer_presenter_if_created("on_dimension_changed")

    def slicepoint_changed(self):
        """Indicates the slicepoint has been updated"""
        self._call_peaks_presenter_if_created("notify",
                                              PeaksViewerPresenter.Event.SlicePointChanged)
        self._call_cutviewer_presenter_if_created("on_slicepoint_changed")
        self.update_plot_data()

    def export_roi(self, limits):
        """Notify that an roi has been selected for export to a workspace
        :param limits: 2-tuple of ((left, right), (bottom, top)). These are in display order
        """
        data_view = self.view.data_view

        try:
            self._show_status_message(
                self.model.export_roi_to_workspace(self.get_slicepoint(),
                                                   bin_params=data_view.dimensions.get_bin_params(),
                                                   limits=limits,
                                                   transpose=data_view.dimensions.transpose,
                                                   dimension_indices=data_view.dimensions.get_states()))
        except Exception as exc:
            self._logger.error(str(exc))
            self._show_status_message("Error exporting ROI")

    def export_cut(self, limits, cut_type):
        """Notify that an roi has been selected for export to a workspace
        :param limits: 2-tuple of ((left, right), (bottom, top)). These are in display order
        and could be transposed w.r.t to the data
        :param cut: A string indicating the required cut type
        """
        data_view = self.view.data_view

        try:
            self._show_status_message(
                self.model.export_cuts_to_workspace(
                    self.get_slicepoint(),
                    bin_params=data_view.dimensions.get_bin_params(),
                    limits=limits,
                    transpose=data_view.dimensions.transpose,
                    dimension_indices=data_view.dimensions.get_states(),
                    cut=cut_type))
        except Exception as exc:
            self._logger.error(str(exc))
            self._show_status_message("Error exporting roi cut")

    def export_pixel_cut(self, pos, axis):
        """Notify a single pixel line plot has been requested from the
        given position in data coordinates.
        :param pos: Position on the image
        :param axis: String indicating the axis the position relates to: 'x' or 'y'
        """
        data_view = self.view.data_view

        try:
            self._show_status_message(
                self.model.export_pixel_cut_to_workspace(
                    self.get_slicepoint(),
                    bin_params=data_view.dimensions.get_bin_params(),
                    pos=pos,
                    transpose=data_view.dimensions.transpose,
                    axis=axis))
        except Exception as exc:
            self._logger.error(str(exc))
            self._show_status_message("Error exporting single-pixel cut")

    def perform_non_axis_aligned_cut(self, vectors, extents, nbins):
        try:
            wscut_name = self.model.perform_non_axis_aligned_cut_to_workspace(vectors, extents, nbins)
            self._call_cutviewer_presenter_if_created('on_cut_done', wscut_name)
        except Exception as exc:
            self._logger.error(str(exc))
            self._show_status_message("Error exporting single-pixel cut")

    def nonorthogonal_axes(self, state: bool):
        """
        Toggle non-orthogonal axes on current view
        :param state: If true a request is being made to turn them on, else they should be turned off
        """
        data_view = self.view.data_view
        if state:
            data_view.deactivate_and_disable_tool(ToolItemText.REGIONSELECTION)
            data_view.disable_tool_button(ToolItemText.NONAXISALIGNEDCUTS)
            data_view.disable_tool_button(ToolItemText.LINEPLOTS)
            # set transform from sliceinfo but ignore view as non-ortho state not set yet
            data_view.create_axes_nonorthogonal(self.get_sliceinfo(force_nonortho_mode=True).get_northogonal_transform())
            self.show_all_data_clicked()
        else:
            data_view.create_axes_orthogonal()
            data_view.enable_tool_button(ToolItemText.LINEPLOTS)
            data_view.enable_tool_button(ToolItemText.REGIONSELECTION)
            data_view.enable_tool_button(ToolItemText.NONAXISALIGNEDCUTS)

        self.new_plot()

    def normalization_changed(self, norm_type):
        """
        Notify the presenter that the type of normalization has changed.
        :param norm_type: "By bin width" = volume normalization else no normalization
        """
        self.normalization = norm_type == "By bin width"
        self.new_plot()

    def overlay_peaks_workspaces(self):
        """
        Request activation of peak overlay tools.
          - Asks user to select peaks workspace(s), taking into account any current selection
          - Attaches peaks table viewer/tools if new workspaces requested. Removes any unselected
          - Displays peaks on data display (if any left to display)
        """
        names_overlayed = self._overlayed_peaks_workspaces()
        names_to_overlay = self.view.query_peaks_to_overlay(names_overlayed)
        if names_to_overlay is None:
            # cancelled
            return
        if names_to_overlay or names_overlayed:
            self._create_peaks_presenter_if_necessary().overlay_peaksworkspaces(names_to_overlay)
        else:
            self.view.peaks_view.hide()

    def non_axis_aligned_cut(self, state):
        data_view = self._data_view
        if state:
            if self._cutviewer_presenter is None:
                self._cutviewer_presenter = CutViewerPresenter(self, data_view.canvas)
                self.view.add_widget_to_splitter(self._cutviewer_presenter.get_view())
            self._cutviewer_presenter.show_view()
            data_view.deactivate_tool(ToolItemText.ZOOM)
            for tool in [ToolItemText.REGIONSELECTION, ToolItemText.LINEPLOTS, ToolItemText.NONORTHOGONAL_AXES]:
                data_view.deactivate_and_disable_tool(tool)
            # turn off cursor tracking as this causes plot to resize interfering with interactive cutting tool
            data_view.track_cursor.setChecked(False)  # on_track_cursor_state_change(False)
        else:
            self._cutviewer_presenter.hide_view()
            for tool in [ToolItemText.REGIONSELECTION, ToolItemText.LINEPLOTS]:
                data_view.enable_tool_button(tool)
            if self.get_sliceinfo().can_support_nonorthogonal_axes():
                data_view.enable_tool_button(ToolItemText.NONORTHOGONAL_AXES)

    def replace_workspace(self, workspace_name, workspace):
        """
        Called when the SliceViewerADSObserver has detected that a workspace has changed
        @param workspace_name: the name of the workspace that has changed
        @param workspace: the workspace that has changed
        """
        if not self.model.workspace_equals(workspace_name):
            # TODO this is a dead branch, since the ADS observer will call this if the
            # names are the same, but the model "workspace_equals" simply checks for the same name
            return
        try:
            candidate_model = SliceViewerModel(workspace)
            candidate_model_properties = candidate_model.get_properties()
            for (property, value) in candidate_model_properties.items():
                if self.initial_model_properties[property] != value:
                    raise ValueError(f"The property {property} is different on the new workspace.")

            # New model is OK, proceed with updating Slice Viewer
            self.model = candidate_model
            self.new_plot, self.update_plot_data = self._decide_plot_update_methods()
            self.refresh_view()
        except ValueError as err:
            self._close_view_with_message(
                f"Closing Sliceviewer as the underlying workspace was changed: {str(err)}")
            return

    def refresh_view(self):
        """
        Updates the view to enable/disable certain options depending on the model.
        """
        if not self.view:
            return

        # we don't want to use model.get_ws for the image info widget as this needs
        # extra arguments depending on workspace type.
        ws = self.model.ws
        ws.readLock()
        try:
            self.view.data_view.image_info_widget.setWorkspace(ws)
            self.new_plot()
        finally:
            ws.unlock()

    def rename_workspace(self, old_name, new_name):
        if self.model.workspace_equals(old_name):
            self.model.set_ws_name(new_name)
            self.view.emit_rename(self.model.get_title(new_name))

    def delete_workspace(self, ws_name):
        if self.model.workspace_equals(ws_name):
            self.view.emit_close()

    def ADS_cleared(self):
        if self.view:
            self.view.emit_close()

    def clear_observer(self):
        """Called by ObservingView on close event"""
        self.ads_observer = None
        if self._peaks_presenter is not None:
            self._peaks_presenter.clear_observer()

    def canvas_clicked(self, event):
        if self._peaks_presenter is not None:
            if event.inaxes:
                sliceinfo = self.get_sliceinfo()
                self._logger.debug(f"Coordinates selected x={event.xdata} y={event.ydata} z={sliceinfo.z_value}")
                pos = sliceinfo.inverse_transform([event.xdata, event.ydata, sliceinfo.z_value])
                self._logger.debug(f"Coordinates transformed into {self.get_frame()} frame, pos={pos}")
                self._peaks_presenter.add_delete_peak(pos)
                self.view.data_view.canvas.draw_idle()

    def deactivate_zoom_pan(self):
        self.view.data_view.deactivate_zoom_pan()

    def zoom_pan_clicked(self, active):
        if active and self._peaks_presenter is not None:
            self._peaks_presenter.deactivate_peak_add_delete()

    # private api
    def _create_peaks_presenter_if_necessary(self):
        if self._peaks_presenter is None:
            self._peaks_presenter = PeaksViewerCollectionPresenter(self.view.peaks_view)
        return self._peaks_presenter

    def _call_peaks_presenter_if_created(self, attr, *args, **kwargs):
        """
        Call a method on the peaks presenter if it has been created
        :param attr: The attribute to call
        :param *args: Positional-arguments to pass to call
        :param **kwargs Keyword-arguments to pass to call
        """
        if self._peaks_presenter is not None:
            getattr(self._peaks_presenter, attr)(*args, **kwargs)

    def _call_cutviewer_presenter_if_created(self, attr, *args, **kwargs):
        """
        Call a method on the peaks presenter if it has been created
        :param attr: The attribute to call
        :param *args: Positional-arguments to pass to call
        :param **kwargs Keyword-arguments to pass to call
        """
        if self._cutviewer_presenter is not None:
            getattr(self._cutviewer_presenter, attr)(*args, **kwargs)

    def _show_status_message(self, message: str):
        """
        Show a temporary message in the status of the view
        """
        self.view.data_view.show_temporary_status_message(message, self.TEMPORARY_STATUS_TIMEOUT)

    def _overlayed_peaks_workspaces(self):
        """
        :return: A list of names of the current PeaksWorkspaces overlayed
        """
        current_workspaces = []
        if self._peaks_presenter is not None:
            current_workspaces = self._peaks_presenter.workspace_names()

        return current_workspaces

    def _decide_plot_update_methods(self) -> Tuple[Callable, Callable]:
        """
        Checks the type of workspace in self.model and decides which of the
        new_plot and update_plot_data methods to use
        :return: the new_plot method to use
        """
        # TODO get rid of private access here
        ws_type = WorkspaceInfo.get_ws_type(self.model.ws)
        if ws_type == WS_TYPE.MDH:
            return self.new_plot_MDH, self.update_plot_data_MDH
        elif ws_type == WS_TYPE.MDE:
            return self.new_plot_MDE, self.update_plot_data_MDE
        else:
            return self.new_plot_matrix, self.update_plot_data_matrix

    def _close_view_with_message(self, message: str):
        self.view.emit_close()  # inherited from ObservingView
        self._logger.warning(message)

    def notify_close(self):
        self.view = None

    def action_open_help_window(self):
        InterfaceManager().showHelpPage('qthelp://org.mantidproject/doc/workbench/sliceviewer.html')
示例#20
0
class ErrorReporterPresenter(object):
    SENDING_ERROR_MESSAGE = 'There was an error when sending the report.\nPlease contact [email protected] directly'

    def __init__(self,
                 view,
                 exit_code,
                 application='mantidplot',
                 traceback=''):
        self.error_log = Logger("error")
        self._view = view
        self._exit_code = exit_code
        self._application = application
        self._traceback = traceback
        self._view.set_report_callback(self.error_handler)

        if not traceback:
            traceback_file_path = os.path.join(
                ConfigService.getAppDataDirectory(),
                '{}_stacktrace.txt'.format(application))
            try:
                if os.path.isfile(traceback_file_path):
                    with open(traceback_file_path, 'r') as file:
                        self._traceback = file.readlines()
                    new_workspace_name = os.path.join(
                        ConfigService.getAppDataDirectory(),
                        '{}_stacktrace_sent.txt'.format(application))
                    os.rename(traceback_file_path, new_workspace_name)
            except OSError:
                pass

    def do_not_share(self, continue_working=True):
        self.error_log.notice("No information shared")
        self._handle_exit(continue_working)
        return -1

    def share_non_identifiable_information(self, continue_working):
        uptime = UsageService.getUpTime()
        status = self._send_report_to_server(share_identifiable=False,
                                             uptime=uptime)
        self.error_log.notice("Sent non-identifiable information")
        self._handle_exit(continue_working)
        return status

    def share_all_information(self, continue_working, name, email, text_box):
        uptime = UsageService.getUpTime()
        status = self._send_report_to_server(share_identifiable=True,
                                             uptime=uptime,
                                             name=name,
                                             email=email,
                                             text_box=text_box)
        self.error_log.notice("Sent full information")

        self._handle_exit(continue_working)
        return status

    def error_handler(self, continue_working, share, name, email, text_box):
        if share == 0:
            status = self.share_all_information(continue_working, name, email,
                                                text_box)
        elif share == 1:
            status = self.share_non_identifiable_information(continue_working)
        elif share == 2:
            status = self.do_not_share(continue_working)
        else:
            self.error_log.error(
                "Unrecognised signal in errorreporter exiting")
            self._handle_exit(continue_working)
            status = -2

        return status

    def _handle_exit(self, continue_working):
        if not continue_working:
            self.error_log.error("Terminated by user.")
            self._view.quit()
        else:
            self.error_log.error("Continue working.")

    def _send_report_to_server(self,
                               share_identifiable=False,
                               name='',
                               email='',
                               uptime='',
                               text_box=''):
        errorReporter = ErrorReporter(self._application, uptime,
                                      self._exit_code, share_identifiable,
                                      str(name), str(email), str(text_box),
                                      "".join(self._traceback))
        status = errorReporter.sendErrorReport()

        if status != 201:
            self._view.display_message_box(
                'Error contacting server', self.SENDING_ERROR_MESSAGE,
                'http request returned with status {}'.format(status))
            self.error_log.error(
                "Failed to send error report http request returned status {}".
                format(status))

        return status

    def show_view(self):
        self._view.show()

    def show_view_blocking(self):
        self._view.exec_()
示例#21
0
class ErrorReporterPresenter(object):
    SENDING_ERROR_MESSAGE = 'There was an error when sending the report.\nPlease contact [email protected] directly'

    def __init__(self,
                 view,
                 exit_code: str,
                 application: str,
                 traceback: Optional[str] = None):
        """
        :param view: A reference to the view managed by this presenter
        :param exit_code: A string containing the exit_code of the failing application
        :param application: A string containing the failing application name
        :param traceback: An optional string containing a traceback dumped as JSON-encoded string
        """
        self.error_log = Logger("errorreports")
        self._view = view
        self._exit_code = exit_code
        self._application = application
        self._traceback = traceback if traceback else ''
        self._view.set_report_callback(self.error_handler)
        self._view.moreDetailsButton.clicked.connect(self.show_more_details)

        if not traceback:
            traceback_file_path = os.path.join(
                ConfigService.getAppDataDirectory(),
                '{}_stacktrace.txt'.format(application))
            try:
                if os.path.isfile(traceback_file_path):
                    with open(traceback_file_path, 'r') as file:
                        self._traceback = file.readlines()
                    new_workspace_name = os.path.join(
                        ConfigService.getAppDataDirectory(),
                        '{}_stacktrace_sent.txt'.format(application))
                    os.rename(traceback_file_path, new_workspace_name)
            except OSError:
                pass

    def forget_contact_info(self):
        settings = QSettings()
        settings.beginGroup(self._view.CONTACT_INFO)
        settings.setValue(self._view.NAME, "")
        settings.setValue(self._view.EMAIL, "")
        settings.endGroup()

    def do_not_share(self, continue_working=True):
        self.error_log.notice("No information shared")
        self._handle_exit(continue_working)
        if not self._view.rememberContactInfoCheckbox.checkState():
            self.forget_contact_info()
        return -1

    def share_non_identifiable_information(self, continue_working, text_box):
        uptime = UsageService.getUpTime()
        status = self._send_report_to_server(share_identifiable=False,
                                             uptime=uptime,
                                             text_box=text_box)
        self.error_log.notice("Sent non-identifiable information")
        self._handle_exit(continue_working)
        if not self._view.rememberContactInfoCheckbox.checkState():
            self.forget_contact_info()
        return status

    def share_all_information(self, continue_working, new_name, new_email,
                              text_box):
        uptime = UsageService.getUpTime()
        status = self._send_report_to_server(share_identifiable=True,
                                             uptime=uptime,
                                             name=new_name,
                                             email=new_email,
                                             text_box=text_box)
        self.error_log.notice("Sent full information")
        self._handle_exit(continue_working)

        # Remember name and email in QSettings
        if self._view.rememberContactInfoCheckbox.checkState():
            settings = QSettings()
            settings.beginGroup(self._view.CONTACT_INFO)
            settings.setValue(self._view.NAME, new_name)
            settings.setValue(self._view.EMAIL, new_email)
            settings.endGroup()
        else:
            self.forget_contact_info()
        return status

    def error_handler(self, continue_working, share, name, email, text_box):
        if share == 0:
            status = self.share_all_information(continue_working, name, email,
                                                text_box)
        elif share == 1:
            status = self.share_non_identifiable_information(
                continue_working, text_box)
        elif share == 2:
            status = self.do_not_share(continue_working)
        else:
            self.error_log.error(
                "Unrecognised signal in errorreporter exiting")
            self._handle_exit(continue_working)
            status = -2

        return status

    def _handle_exit(self, continue_working):
        if not continue_working:
            self.error_log.error("Terminated by user.")
            self._view.quit()
        else:
            self.error_log.error("Continue working.")

    def _send_report_to_server(self,
                               share_identifiable=False,
                               name='',
                               email='',
                               uptime='',
                               text_box=''):
        errorReporter = ErrorReporter(self._application, uptime,
                                      self._exit_code, share_identifiable,
                                      str(name), str(email), str(text_box),
                                      "".join(self._traceback))
        status = errorReporter.sendErrorReport()

        if status != 201:
            self._view.display_message_box(
                'Error contacting server', self.SENDING_ERROR_MESSAGE,
                'http request returned with status {}'.format(status))
            self.error_log.error(
                "Failed to send error report http request returned status {}".
                format(status))

        return status

    def show_view(self):
        self._view.show()

    def show_view_blocking(self):
        self._view.exec_()

    def show_more_details(self):
        error_reporter = ErrorReporter(
            self._application, UsageService.getUpTime(), self._exit_code, True,
            str(self._view.input_name_line_edit.text()),
            str(self._view.input_email_line_edit.text()),
            str(self._view.input_free_text.toPlainText()),
            "".join(self._traceback))

        error_message_json = json.loads(error_reporter.generateErrorMessage())
        stacktrace_text = error_message_json["stacktrace"]
        del error_message_json[
            "stacktrace"]  # remove this entry so it doesn't appear twice.
        user_information = ''.join(
            '{}: {}\n'.format(key, error_message_json[key])
            for key in error_message_json)
        self._view.display_more_details(user_information, stacktrace_text)