Example #1
0
 def read_mantidplot_project(self, file_name):
     """
     :param file_name: String or string castable object; the file_name of the project
     :return: returns True if able to load the project, otherwise False
     Will attempt to read the project file in from the file_name that is given as a mantidplot project file.
     """
     #Get the string inside the mantidworkspaces tags, allowing for whitespace at either end
     workspaces_pattern = r"<mantidworkspaces>\s*(.*?)\s*<\/mantidworkspaces>"
     try:
         with open(file_name) as f:
             full_text = f.read()
             ws_match = re.search(workspaces_pattern,full_text,re.DOTALL)
             if ws_match:
                 #split by tab
                 ws_list = ws_match.group(1).split('\t')
                 if len(ws_list) > 1 and ws_list[0] == "WorkspaceNames":
                     #the first entry is just an identification tag
                     self.workspace_names = ws_list[1:]
                     logger.notice("Loading workspaces from Mantidplot project file " + file_name)
                     return True
             logger.warning("Mantidplot project file unable to be read")
             return False
     except Exception:
         logger.warning("Mantidplot project file unable to be loaded/read")
         return False
def rebin_reduction(workspace_name, rebin_string, multi_frame_rebin_string, num_bins):
    """
    @param workspace_name Name of workspace to rebin
    @param rebin_string Rebin parameters
    @param multi_frame_rebin_string Rebin string for multiple frame rebinning
    @param num_bins Max number of bins in input frames
    """
    from mantid.simpleapi import (Rebin, RebinToWorkspace, SortXAxis)

    if rebin_string is not None:
        if multi_frame_rebin_string is not None and num_bins is not None:
            # Multi frame data
            if mtd[workspace_name].blocksize() == num_bins:
                Rebin(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      Params=rebin_string)
            else:
                Rebin(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      Params=multi_frame_rebin_string)
        else:
            # Regular data
            SortXAxis(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name)
            Rebin(InputWorkspace=workspace_name,
                  OutputWorkspace=workspace_name,
                  Params=rebin_string)
    else:
        try:
            # If user does not want to rebin then just ensure uniform binning across spectra
            RebinToWorkspace(WorkspaceToRebin=workspace_name,
                             WorkspaceToMatch=workspace_name,
                             OutputWorkspace=workspace_name)
        except RuntimeError:
            logger.warning('Rebinning failed, will try to continue anyway.')
Example #3
0
def chop_workspace(workspace, monitor_index):
    """
    Chops the specified workspace if its maximum x-value exceeds its instrument
    parameter, 'Workflow.ChopDataIfGreaterThan'.

    :param workspace:     The workspace to chop
    :param monitor_index: The index of the monitor spectra in the workspace.
    :return:              A tuple of the list of output workspace names and a boolean
                          specifying whether the workspace was chopped.
    """
    from mantid.simpleapi import ChopData

    workspace_name = workspace.getName()

    # Chop data if required
    try:
        chop_threshold = workspace.getInstrument().getNumberParameter('Workflow.ChopDataIfGreaterThan')[0]
        x_max = workspace.readX(0)[-1]
        chopped_data = x_max > chop_threshold
    except IndexError:
        logger.warning("Chop threshold not found in instrument parameters")
        chopped_data = False
    logger.information('Workspace {0} need data chop: {1}'.format(workspace_name, str(chopped_data)))

    if chopped_data:
        ChopData(InputWorkspace=workspace,
                 OutputWorkspace=workspace_name,
                 MonitorWorkspaceIndex=monitor_index,
                 IntegrationRangeLower=5000.0,
                 IntegrationRangeUpper=10000.0,
                 NChops=5)
        return mtd[workspace_name].getNames(), True
    else:
        return [workspace_name], False
Example #4
0
    def write_out(self):
        """
        Write out the project file that contains workspace names, interfaces information, plot preferences etc.
        """
        # Get the JSON string versions
        to_save_dict = {
            "workspaces": self.workspace_names,
            "plots": self.plots_to_save,
            "interfaces": self.interfaces_to_save
        }

        # Open file and save the string to it alongside the workspace_names
        if not os.path.isdir(self.directory):
            os.makedirs(self.directory)
        file_path = os.path.join(
            self.directory,
            (os.path.basename(self.directory) + self.project_file_ext))
        try:
            with open(file_path, "w+") as f:
                dump(obj=to_save_dict, fp=f)
        except Exception as e:
            # Catch any exception and log it
            if isinstance(e, KeyboardInterrupt):
                raise
            logger.warning("JSON project file unable to be opened/written to")
Example #5
0
def plotDOS(workspaces, labels=None, style='l', xscale='linear', yscale='linear'):
    """Plot density of state workspaces.

    Plots the given DOS workspaces.

    :param workspaces: a single workspace or a list thereof
    :type workspaces: str, :class:`mantid.api.MatrixWorkspace` or a :class:`list` thereof
    :param labels: a list of labels for the plot legend
    :type labels: str, a :class:`list` of strings or None
    :param style: plot style: 'l' for lines, 'm' for markers, 'lm' for both
    :type style: str
    :param xscale: horizontal axis scaling: 'linear', 'log', 'symlog', 'logit'
    :type xscale: str
    :param yscale: vertical axis scaling: 'linear', 'log', 'symlog', 'logit'
    :type yscale: str
    :returns: a tuple of (:mod:`matplotlib.Figure`, :mod:`matplotlib.Axes`)
    """
    _validate._styleordie(style)
    workspaces = _normwslist(workspaces)
    for ws in workspaces:
        _validate._singlehistogramordie(ws)
        if not _validate._isDOS(ws):
            logger.warning("The workspace '{}' does not look like proper DOS data. Trying to plot nonetheless.".format(ws))
    if labels is None:
        labels = [_workspacelabel(ws) for ws in workspaces]
    figure, axes = _plotsinglehistogram(workspaces, labels, style, xscale, yscale)
    _dostitle(workspaces, axes)
    if len(workspaces) > 1:
        axes.legend()
    return figure, axes
def chop_workspace(workspace, monitor_index):
    """
    Chops the specified workspace if its maximum x-value exceeds its instrument
    parameter, 'Workflow.ChopDataIfGreaterThan'.

    :param workspace:     The workspace to chop
    :param monitor_index: The index of the monitor spectra in the workspace.
    :return:              A tuple of the list of output workspace names and a boolean
                          specifying whether the workspace was chopped.
    """
    from mantid.simpleapi import ChopData

    workspace_name = workspace.getName()

    # Chop data if required
    try:
        chop_threshold = workspace.getInstrument().getNumberParameter('Workflow.ChopDataIfGreaterThan')[0]
        x_max = workspace.readX(0)[-1]
        chopped_data = x_max > chop_threshold
    except IndexError:
        logger.warning("Chop threshold not found in instrument parameters")
        chopped_data = False
    logger.information('Workspace {0} need data chop: {1}'.format(workspace_name, str(chopped_data)))

    if chopped_data:
        ChopData(InputWorkspace=workspace,
                 OutputWorkspace=workspace_name,
                 MonitorWorkspaceIndex=monitor_index,
                 IntegrationRangeLower=5000.0,
                 IntegrationRangeUpper=10000.0,
                 NChops=5)
        return mtd[workspace_name].getNames(), True
    else:
        return [workspace_name], False
Example #7
0
def _confirm_all_workspaces_loaded(workspaces_to_confirm):
    current_workspaces = ADS.getObjectNames()
    for ws in workspaces_to_confirm:
        if ws not in current_workspaces:
            logger.warning("Project Loader was unable to load back all of project workspaces")
            return False
    return True
Example #8
0
def _create_multi_domain_func(function, input_ws):
    multi = 'composite=MultiDomainFunction,NumDeriv=true;'
    comp = '(composite=CompositeFunction,NumDeriv=true,$domains=i;' + str(function) + ');'
    stretched_indices = _find_indices_of_stretched_exponentials(function)

    if not stretched_indices:
        logger.warning("Stretched Exponential not found in function, tie-creation skipped.")
        return function

    ties = []
    kwargs = {}
    num_spectra = mtd[input_ws].getNumberHistograms()
    for i in range(0, num_spectra):
        multi += comp
        kwargs['WorkspaceIndex_' + str(i)] = i

        if i > 0:
            kwargs['InputWorkspace_' + str(i)] = input_ws

            # tie beta for every spectrum
            for stretched_index in stretched_indices:
                ties.append('f{0}.f{1}.Stretching=f0.f{1}.Stretching'.format(i, stretched_index))

    ties = ','.join(ties)
    multi += 'ties=(' + ties + ')'

    return multi, kwargs
Example #9
0
    def save_workspaces(self, workspaces_to_save=None):
        """
        Use the private method _get_workspaces_to_save to get a list of workspaces that are present in the ADS to save
        to the directory that was passed at object creation time, it will also add each of them to the output_list
        private instance variable on the WorkspaceSaver class.
        :param workspaces_to_save: List of Strings; The workspaces that are to be saved to the project.
        """

        # Handle getting here and nothing has been given passed
        if workspaces_to_save is None:
            return

        for workspace_name in workspaces_to_save:
            # Get the workspace from the ADS
            workspace = ADS.retrieve(workspace_name)
            place_to_save_workspace = os.path.join(self.directory, workspace_name)

            from mantid.simpleapi import SaveMD, SaveNexusProcessed

            try:
                if isinstance(workspace, MDHistoWorkspace) or isinstance(workspace, IMDEventWorkspace):
                    # Save normally using SaveMD
                    SaveMD(InputWorkspace=workspace_name, Filename=place_to_save_workspace + ".nxs")
                else:
                    # Save normally using SaveNexusProcessed
                    SaveNexusProcessed(InputWorkspace=workspace_name, Filename=place_to_save_workspace + ".nxs")
            except Exception:
                logger.warning("Couldn't save workspace in project: " + workspace)

            self.output_list.append(workspace_name)
Example #10
0
    def __init__(self, **kwargs):
        self._shape_type = common.dictionary_key_helper(dictionary=kwargs,
                                                        key="shape",
                                                        throws=False)
        if self._shape_type is None:
            self._shape_type = "cylinder"
            warning = "Failed to supply parameter \"shape\" to SampleDetails - defaulting to \"cylinder\""
            print(
                "WARNING: {}".format(warning))  # Show warning in script window
            logger.warning(warning)  # Show warning in Mantid logging area

        center = common.dictionary_key_helper(
            dictionary=kwargs,
            key="center",
            exception_msg=property_err_string.format("center"))
        SampleDetails._validate_center(center)
        self._center = [float(i) for i in center]  # List of X, Y, Z position

        if self._shape_type == "cylinder":
            self._shape = _Cylinder(kwargs)
        elif self._shape_type == "slab":
            self._shape = _Slab(kwargs)
        else:
            raise KeyError(
                "Shape type \"" + self._shape_type +
                "\" not supported: current supported shape types are "
                "\"cylinder\" and \"slab\"")

        self.material_object = None
Example #11
0
    def show(self):
        """
        Override the base class method. Initialise the peak editing tool.
        """
        allowed_spectra = self._get_allowed_spectra()
        if allowed_spectra:
            self._add_spectra(allowed_spectra)
        else:
            self.toolbar_manager.toggle_fit_button_checked()
            logger.warning("Cannot open fitting tool: No valid workspaces to "
                           "fit to.")
            return

        self.tool = FitInteractiveTool(
            self.canvas,
            self.toolbar_manager,
            current_peak_type=self.defaultPeakType())
        self.tool.fit_range_changed.connect(self.set_fit_range)
        self.tool.peak_added.connect(self.peak_added_slot)
        self.tool.peak_moved.connect(self.peak_moved_slot)
        self.tool.peak_fwhm_changed.connect(self.peak_fwhm_changed_slot)
        self.tool.peak_type_changed.connect(self.setDefaultPeakType)
        self.tool.add_background_requested.connect(self.add_function_slot)
        self.tool.add_other_requested.connect(self.add_function_slot)
        super(FitPropertyBrowser, self).show()

        self.set_fit_bounds(self.get_fit_bounds())
        self.set_fit_range(self.tool.fit_range.get_range())

        self.setPeakToolOn(True)
        self.canvas.draw()
def rebin_reduction(workspace_name, rebin_string, multi_frame_rebin_string, num_bins):
    """
    @param workspace_name Name of workspace to rebin
    @param rebin_string Rebin parameters
    @param multi_frame_rebin_string Rebin string for multiple frame rebinning
    @param num_bins Max number of bins in input frames
    """
    from mantid.simpleapi import (Rebin, RebinToWorkspace, SortXAxis)

    if rebin_string is not None:
        if multi_frame_rebin_string is not None and num_bins is not None:
            # Multi frame data
            if mtd[workspace_name].blocksize() == num_bins:
                Rebin(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      Params=rebin_string)
            else:
                Rebin(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      Params=multi_frame_rebin_string)
        else:
            # Regular data
            SortXAxis(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name)
            Rebin(InputWorkspace=workspace_name,
                  OutputWorkspace=workspace_name,
                  Params=rebin_string)
    else:
        try:
            # If user does not want to rebin then just ensure uniform binning across spectra
            RebinToWorkspace(WorkspaceToRebin=workspace_name,
                             WorkspaceToMatch=workspace_name,
                             OutputWorkspace=workspace_name)
        except RuntimeError:
            logger.warning('Rebinning failed, will try to continue anyway.')
    def _get_spectra_index(self, input_ws):
        """
        Gets the index of the two monitors and first detector for the current instrument configurtion.
        Assumes monitors are named monitor1 and monitor2
        """

        instrument = mtd[input_ws].getInstrument()

        try:
            analyser = instrument.getStringParameter('analyser')[0]
            detector_1_idx = instrument.getComponentByName(analyser)[0].getID() - 1
            logger.information('Got index of first detector for analyser %s: %d' % (analyser, detector_1_idx))
        except IndexError:
            detector_1_idx = 2
            logger.warning('Could not determine index of first detetcor, using default value.')

        try:
            monitor_1_idx = self._get_detector_spectrum_index(input_ws, instrument.getComponentByName('monitor1').getID())

            monitor_2 = instrument.getComponentByName('monitor2')
            if monitor_2 is not None:
                monitor_2_idx = self._get_detector_spectrum_index(input_ws, monitor_2.getID())
            else:
                monitor_2_idx = None

            logger.information('Got index of monitors: %d, %s' % (monitor_1_idx, str(monitor_2_idx)))
        except IndexError:
            monitor_1_idx = 0
            monitor_2_idx = 1
            logger.warning('Could not determine index of monitors, using default values.')

        return monitor_1_idx, monitor_2_idx, detector_1_idx
Example #14
0
    def write_out(self):
        """
        Write out the project file that contains workspace names, interfaces information, plot preferences etc.
        """
        # Get the JSON string versions
        to_save_dict = {
            "workspaces": self.workspace_names,
            "plots": self.plots_to_save,
            "interfaces": self.interfaces_to_save
        }

        # Open file and save the string to it alongside the workspace_names
        if self.project_file_ext not in os.path.basename(self.file_name):
            self.file_name = self.file_name + self.project_file_ext
        try:
            with open(self.file_name, "w+") as f:
                dump(obj=to_save_dict, fp=f)
        except KeyboardInterrupt:
            # Catch any exception and log it
            raise
        except (IOError, OSError, WindowsError) as e:
            logger.warning("JSON project file unable to be opened/written to.")
            logger.debug("Full error: {}".format(e))
        except Exception as e:
            logger.warning("Unknown error occurred. Full detail: {}".format(e))
Example #15
0
    def save_project(self, directory, workspace_to_save=None):
        """
        The method that will actually save the project and call relevant savers for workspaces, plots, interfaces etc.
        :param directory: String; The directory of the
        :param workspace_to_save: List; of Strings that will have workspace names in it, if None will save all
        :return: None; If the method cannot be completed.
        """
        # Check if the directory doesn't exist
        if directory is None:
            logger.warning("Can not save to empty directory")
            return

        # Check this isn't saving a blank project file
        if workspace_to_save is None:
            logger.warning("Can not save an empty project")
            return

        # Save workspaces to that location
        workspace_saver = workspacesaver.WorkspaceSaver(directory=directory)
        workspace_saver.save_workspaces(workspaces_to_save=workspace_to_save)

        # Pass dicts to Project Writer
        writer = ProjectWriter(directory, workspace_saver.get_output_list(),
                               self.project_file_ext)
        writer.write_out()
    def show(self):
        """
        Override the base class method. Initialise the peak editing tool.
        """
        allowed_spectra = self._get_allowed_spectra()
        if allowed_spectra:
            self._add_spectra(allowed_spectra)
        else:
            self.toolbar_manager.toggle_fit_button_checked()
            logger.warning("Cannot open fitting tool: No valid workspaces to "
                           "fit to.")
            return

        self.tool = FitInteractiveTool(self.canvas, self.toolbar_manager,
                                       current_peak_type=self.defaultPeakType())
        self.tool.fit_start_x_moved.connect(self.setStartX)
        self.tool.fit_end_x_moved.connect(self.setEndX)
        self.tool.peak_added.connect(self.peak_added_slot)
        self.tool.peak_moved.connect(self.peak_moved_slot)
        self.tool.peak_fwhm_changed.connect(self.peak_fwhm_changed_slot)
        self.tool.peak_type_changed.connect(self.setDefaultPeakType)
        self.tool.add_background_requested.connect(self.add_function_slot)
        self.tool.add_other_requested.connect(self.add_function_slot)
        self.setXRange(self.tool.fit_start_x.x, self.tool.fit_end_x.x)
        super(FitPropertyBrowser, self).show()
        self.setPeakToolOn(True)
        self.canvas.draw()
Example #17
0
def plotDOS(workspaces, labels=None, style='l', xscale='linear', yscale='linear'):
    """Plot density of state workspaces.

    Plots the given DOS workspaces.

    :param workspaces: a single workspace or a list thereof
    :type workspaces: str, :class:`mantid.api.MatrixWorkspace` or a :class:`list` thereof
    :param labels: a list of labels for the plot legend
    :type labels: str, a :class:`list` of strings or None
    :param style: plot style: 'l' for lines, 'm' for markers, 'lm' for both
    :type style: str
    :param xscale: horizontal axis scaling: 'linear', 'log', 'symlog', 'logit'
    :type xscale: str
    :param yscale: vertical axis scaling: 'linear', 'log', 'symlog', 'logit'
    :type yscale: str
    :returns: a tuple of (:mod:`matplotlib.Figure`, :mod:`matplotlib.Axes`)
    """
    _validate._styleordie(style)
    workspaces = _normwslist(workspaces)
    for ws in workspaces:
        _validate._singlehistogramordie(ws)
        if not _validate._isDOS(ws):
            logger.warning("The workspace '{}' does not look like proper DOS data. Trying to plot nonetheless.".format(ws))
    if labels is None:
        labels = [_workspacelabel(ws) for ws in workspaces]
    figure, axes = _plotsinglehistogram(workspaces, labels, style, xscale, yscale)
    _dostitle(workspaces, axes)
    if len(workspaces) > 1:
        axes.legend()
    return figure, axes
Example #18
0
def scipy_not_available():
    ''' Check whether scipy is available on this platform'''
    try:
        import scipy
        return False
    except:
        logger.warning("Skipping DensityOfStatesTest because scipy is unavailable.")
        return True
def scipy_not_available():
    ''' Check whether scipy is available on this platform'''
    try:
        import scipy
        return False
    except:
        logger.warning("Skipping SimulatedDensityOfStatesTest because scipy is unavailable.")
        return True
Example #20
0
def _confirm_all_workspaces_loaded(workspaces_to_confirm):
    current_workspaces = ADS.getObjectNames()
    for ws in workspaces_to_confirm:
        if ws not in current_workspaces:
            logger.warning(
                "Project Loader was unable to load back all of project workspaces"
            )
            return False
    return True
    def _get_spectra_index(self, input_ws):
        """
        Gets the index of the two monitors and first detector for the current instrument configurtion.
        Assumes monitors are named monitor1 and monitor2
        """

        workspace = mtd[input_ws]
        instrument = workspace.getInstrument()

        # Get workspace index of first detector
        detector_1_idx = 2

        try:
            # First try to get first detector for current analyser bank
            analyser = instrument.getStringParameter('analyser')[0]
            detector_1_idx = instrument.getComponentByName(
                analyser)[0].getID() - 1
            logger.information(
                'Got index of first detector for analyser %s: %d' %
                (analyser, detector_1_idx))

        except IndexError:
            # If that fails just get the first spectrum which is a detector
            spectrumInfo = workspace.spectrumInfo()
            for spec_idx in range(workspace.getNumberHistograms()):
                if not spectrumInfo.isMonitor(spec_idx):
                    detector_1_idx = spec_idx
                    logger.information(
                        'Got index of first detector in workspace: %d' %
                        (detector_1_idx))
                    break

        # Get workspace index of monitor(s)
        monitor_1_idx = 0
        monitor_2_idx = None

        if instrument.hasParameter('Workflow.Monitor1-SpectrumNumber'):
            # First try to get monitors based on workflow parameters in IPF
            monitor_1_idx = int(
                instrument.getNumberParameter(
                    'Workflow.Monitor1-SpectrumNumber')[0])

            if instrument.hasParameter('Workflow.Monitor2-SpectrumNumber'):
                monitor_2_idx = int(
                    instrument.getNumberParameter(
                        'Workflow.Monitor2-SpectrumNumber')[0])

            logger.information('Got index of monitors: %d, %s' %
                               (monitor_1_idx, str(monitor_2_idx)))

        else:
            # If that fails just use some default values (correct ~60% of the time)
            monitor_2_idx = 1
            logger.warning(
                'Could not determine index of monitors, using default values.')

        return monitor_1_idx, monitor_2_idx, detector_1_idx
Example #22
0
 def read_project(self):
     try:
         self.parser = MantidPlotProjectParser(self.filename)
         self.read_workspaces()
         self.read_interfaces()
         self.read_plots()
     except Exception as err:
         logger.warning("Mantidplot project file unable to be loaded/read",
                        err)
 def read_project(self):
     try:
         with open(self.filename) as f:
             self.json_data = json.load(f)
             self.read_workspaces()
             self.read_interfaces()
             self.read_plots()
     except Exception as err:
         logger.warning("JSON project file unable to be loaded/read", err)
Example #24
0
    def save_project(self,
                     file_name,
                     workspace_to_save=None,
                     plots_to_save=None,
                     interfaces_to_save=None,
                     project_recovery=True):
        """
        The method that will actually save the project and call relevant savers for workspaces, plots, interfaces etc.
        :param file_name: String; The file_name of the
        :param workspace_to_save: List; of Strings that will have workspace names in it, if None will save all
        :param plots_to_save: List; of matplotlib.figure objects to save to the project file.
        :param interfaces_to_save: List of Lists of Window and Encoder; the interfaces to save and the encoders to use
        :param project_recovery: Bool; If the behaviour of Project Save should be altered to function correctly inside
        of project recovery
        :return: None; If the method cannot be completed.
        """
        # Check if the file_name doesn't exist
        if file_name is None:
            logger.warning("Please select a valid file name")
            return

        # Check this isn't saving a blank project file
        if (workspace_to_save is None and plots_to_save is None
                and interfaces_to_save is None) and project_recovery:
            logger.warning("Can not save an empty project")
            return

        directory = os.path.dirname(file_name)
        # Save workspaces to that location
        if project_recovery:
            workspace_saver = WorkspaceSaver(directory=directory)
            workspace_saver.save_workspaces(
                workspaces_to_save=workspace_to_save)
            saved_workspaces = workspace_saver.get_output_list()
        else:
            # Assume that this is project recovery so pass a list of workspace names
            saved_workspaces = ADS.getObjectNames()

        # Generate plots
        plots_to_save_list = PlotsSaver().save_plots(plots_to_save,
                                                     not project_recovery)

        # Save interfaces
        if interfaces_to_save is None:
            interfaces_to_save = []

        interfaces = self._return_interfaces_dicts(
            directory=directory, interfaces_to_save=interfaces_to_save)

        # Pass dicts to Project Writer
        writer = ProjectWriter(workspace_names=saved_workspaces,
                               plots_to_save=plots_to_save_list,
                               interfaces_to_save=interfaces,
                               save_location=file_name,
                               project_file_ext=self.project_file_ext)
        writer.write_out()
Example #25
0
def CheckAnalysersOrEFixed(workspace1, workspace2):
    """
    Check that the workspaces have EFixed if the technique is direct, otherwise check that the analysers and
    reflections are identical
    """
    if is_technique_direct(workspace1) or is_technique_direct(workspace2):
        logger.warning('Could not find an analyser for the input workspaces because the energy mode is Direct')
        check_e_fixed_are_equal(workspace1, workspace2)
    else:
        check_analysers_are_equal(workspace1, workspace2)
Example #26
0
 def load_plots(self, plots_list):
     if plots_list is None:
         return
     for plot_ in plots_list:
         try:
             self.make_fig(plot_)
         except BaseException as e:
             # Catch all errors in here so it can fail silently-ish
             if isinstance(e, KeyboardInterrupt):
                 raise KeyboardInterrupt(str(e))
             logger.warning("A plot was unable to be loaded from the save file. Error: " + str(e))
Example #27
0
def old_modules():
    """" Check if there are proper versions of  Python and numpy."""
    is_python_old = AbinsTestHelpers.old_python()
    if is_python_old:
        logger.warning("Skipping AbinsBasicTest because Python is too old.")

    is_numpy_old = AbinsTestHelpers.is_numpy_valid(np.__version__)
    if is_numpy_old:
        logger.warning("Skipping AbinsBasicTest because numpy is too old.")

    return is_python_old or is_numpy_old
Example #28
0
 def read_project(self, directory):
     """
     Will read the project file in from the directory that is given.
     :param directory: String or string castable object; the directory of the project
     """
     try:
         with open(os.path.join(directory, (os.path.basename(directory) + self.project_file_ext))) as f:
             json_data = json.load(f)
             self.workspace_names = json_data["workspaces"]
     except Exception:
         logger.warning("JSON project file unable to be loaded/read")
Example #29
0
 def load_workspaces(directory, workspaces_to_load):
     """
     The method that is called to load in workspaces. From the given directory and the workspace names provided.
     :param directory: String or string castable object; The project directory
     :param workspaces_to_load: List of Strings; of the workspaces to load
     """
     from mantid.simpleapi import Load  # noqa
     for workspace in workspaces_to_load:
         try:
             Load(path.join(directory, (workspace + ".nxs")), OutputWorkspace=workspace)
         except Exception:
             logger.warning("Couldn't load file in project: " + workspace + ".nxs")
Example #30
0
    def ties(self, **kwargs):
        """Set ties on the parameters.

        @param kwargs: Ties as name=value pairs: name is a parameter name,
            the value is a tie string or a number. For example:
                tie(A0 = 0.1, A1 = '2*A0')
        """
        for param in kwargs:
            if self.function.hasParameter(self.prefix + param):
                self.function.tie(self.prefix + param, str(kwargs[param]))
            else:
                logger.warning(f"Cannot tie parameter '{param}' as it does not exist in the Function.")
Example #31
0
    def load_plots(self, plots_list):
        if plots_list is None:
            return

        for plot_ in plots_list:
            try:
                self.make_fig(plot_)
            except BaseException as e:
                # Catch all errors in here so it can fail silently-ish
                if isinstance(e, KeyboardInterrupt):
                    raise KeyboardInterrupt(str(e))
                logger.warning("A plot was unable to be loaded from the save file. Error: " + str(e))
Example #32
0
 def load_interfaces(self, directory):
     for interface in self.project_reader.interface_list:
         decoder = self.decoder_factory.find_decoder(interface["tag"])
         try:
             decoded_interface = decoder.decode(interface, directory)
             decoded_interface.setAttribute(Qt.WA_DeleteOnClose, True)
             decoded_interface.show()
         except Exception as e:
             # Catch any exception and log it for the encoder
             if isinstance(e, KeyboardInterrupt):
                 raise
             logger.warning("Project Loader: An interface could not be loaded error: " + str(e))
Example #33
0
 def read_project(self, directory):
     """
     Will read the project file in from the directory that is given.
     :param directory: String or string castable object; the directory of the project
     """
     try:
         with open(
                 os.path.join(directory, (os.path.basename(directory) +
                                          self.project_file_ext))) as f:
             json_data = json.load(f)
             self.workspace_names = json_data["workspaces"]
     except Exception:
         logger.warning("JSON project file unable to be loaded/read")
Example #34
0
    def load_interfaces(self, directory):
        for interface in self.project_reader.interface_list:
            # Find decoder
            decoder = self.decoder_factory.find_decoder(interface["tag"])

            # Decode and Show the interface
            try:
                decoded_interface = decoder.decode(interface, directory)
                decoded_interface.show()
            except Exception as e:
                # Catch any exception and log it for the encoder
                if isinstance(e, KeyboardInterrupt):
                    raise
                logger.warning("Project Loader: An interface could not be loaded error: " + str(e))
Example #35
0
 def _update_workspace_info(self):
     " Update the allowed spectra/tableworkspace in the fit browser"
     allowed_spectra_old = self.allowed_spectra
     allowed_spectra = self._get_allowed_spectra()
     table = self._get_table_workspace()
     if allowed_spectra:
         self._update_spectra(allowed_spectra, allowed_spectra_old)
     elif table:
         self.addAllowedTableWorkspace(table)
     else:
         self.toolbar_manager.toggle_fit_button_checked()
         logger.warning("Cannot use fitting tool: No valid workspaces to fit to.")
         return False
     return True
Example #36
0
 def load_workspaces(directory, workspaces_to_load):
     """
     The method that is called to load in workspaces. From the given directory and the workspace names provided.
     :param directory: String or string castable object; The project directory
     :param workspaces_to_load: List of Strings; of the workspaces to load
     """
     from mantid.simpleapi import Load  # noqa
     for workspace in workspaces_to_load:
         try:
             Load(path.join(directory, (workspace + ".nxs")),
                  OutputWorkspace=workspace)
         except Exception:
             logger.warning("Couldn't load file in project: " + workspace +
                            ".nxs")
Example #37
0
 def _return_interfaces_dicts(directory, interfaces_to_save):
     interfaces = []
     for interface, encoder in interfaces_to_save:
         try:
             # Add to the dictionary encoded data with the key as the first tag in the list on the encoder attributes
             tag = encoder.tags()[0]
             encoded_dict = encoder.encode(interface, directory)
             encoded_dict["tag"] = tag
             interfaces.append(encoded_dict)
         except Exception as e:
             # Catch any exception and log it
             if isinstance(e, KeyboardInterrupt):
                 raise
             logger.warning("Project Saver: An interface could not be saved error: " + str(e))
     return interfaces
Example #38
0
def rebin_reduction(workspace_name, rebin_string, multi_frame_rebin_string,
                    num_bins):
    """
    @param workspace_name Name of workspace to rebin
    @param rebin_string Rebin parameters
    @param multi_frame_rebin_string Rebin string for multiple frame rebinning
    @param num_bins Max number of bins in input frames
    """
    from mantid.simpleapi import (Rebin, SortXAxis, RemoveSpectra)

    if rebin_string is not None:
        if multi_frame_rebin_string is not None and num_bins is not None:
            # Multi frame data
            if mtd[workspace_name].blocksize() == num_bins:
                Rebin(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      Params=rebin_string)
            else:
                Rebin(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      Params=multi_frame_rebin_string)
        else:
            # Regular data
            RemoveSpectra(InputWorkspace=workspace_name,
                          OutputWorkspace=workspace_name,
                          RemoveSpectraWithNoDetector=True)
            SortXAxis(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name)
            Rebin(InputWorkspace=workspace_name,
                  OutputWorkspace=workspace_name,
                  Params=rebin_string)
    else:
        try:
            # If user does not want to rebin then just ensure uniform binning across spectra
            # extract the binning parameters from the first spectrum.
            # there is probably a better way to calculate the binning parameters, but this
            # gets the right answer.
            xaxis = mtd[workspace_name].readX(0)
            params = []
            for i, x in enumerate(xaxis):
                params.append(x)
                if i < len(xaxis) - 1:
                    params.append(xaxis[i + 1] - x)  # delta
            Rebin(InputWorkspace=workspace_name,
                  OutputWorkspace=workspace_name,
                  Params=params)
        except RuntimeError:
            logger.warning('Rebinning failed, will try to continue anyway.')
Example #39
0
    def save_project(self, file_name, workspace_to_save=None, plots_to_save=None, interfaces_to_save=None,
                     project_recovery=True):
        """
        The method that will actually save the project and call relevant savers for workspaces, plots, interfaces etc.
        :param file_name: String; The file_name of the
        :param workspace_to_save: List; of Strings that will have workspace names in it, if None will save all
        :param plots_to_save: List; of matplotlib.figure objects to save to the project file.
        :param interfaces_to_save: List of Lists of Window and Encoder; the interfaces to save and the encoders to use
        :param project_recovery: Bool; If the behaviour of Project Save should be altered to function correctly inside
        of project recovery
        :return: None; If the method cannot be completed.
        """
        # Check if the file_name doesn't exist
        if file_name is None:
            logger.warning("Please select a valid file name")
            return

        # Check this isn't saving a blank project file
        if (workspace_to_save is None and plots_to_save is None and interfaces_to_save is None) and project_recovery:
            logger.warning("Can not save an empty project")
            return

        directory = os.path.dirname(file_name)
        # Save workspaces to that location
        if project_recovery:
            workspace_saver = WorkspaceSaver(directory=directory)
            workspace_saver.save_workspaces(workspaces_to_save=workspace_to_save)
            saved_workspaces = workspace_saver.get_output_list()
        else:
            # Assume that this is project recovery so pass a list of workspace names
            saved_workspaces = ADS.getObjectNames()

        # Generate plots
        plots_to_save_list = PlotsSaver().save_plots(plots_to_save)

        # Save interfaces
        if interfaces_to_save is None:
            interfaces_to_save = []

        interfaces = self._return_interfaces_dicts(directory=directory, interfaces_to_save=interfaces_to_save)

        # Pass dicts to Project Writer
        writer = ProjectWriter(workspace_names=saved_workspaces,
                               plots_to_save=plots_to_save_list,
                               interfaces_to_save=interfaces,
                               save_location=file_name,
                               project_file_ext=self.project_file_ext)
        writer.write_out()
Example #40
0
    def save_plots(self, plot_dict):
        # if arguement is none return empty dictionary
        if plot_dict is None:
            return []

        plot_list = []
        for index in plot_dict:
            try:
                plot_list.append(self.get_dict_from_fig(plot_dict[index].canvas.figure))
            except BaseException as e:
                # Catch all errors in here so it can fail silently-ish, if this is happening on all plots make sure you
                # have built your project.
                if isinstance(e, KeyboardInterrupt):
                    raise KeyboardInterrupt
                logger.warning("A plot was unable to be saved")
        return plot_list
Example #41
0
    def _return_interfaces_dicts(directory, interfaces_to_save):
        interfaces = []
        for interface, encoder in interfaces_to_save:
            # Add to the dictionary encoded data with the key as the first tag in the list on the encoder attributes
            try:
                tag = encoder.tags[0]
                encoded_dict = encoder.encode(interface, directory)
                encoded_dict["tag"] = tag
                interfaces.append(encoded_dict)
            except Exception as e:
                # Catch any exception and log it
                if isinstance(e, KeyboardInterrupt):
                    raise
                logger.warning("Project Saver: An interface could not be saver error: " + str(e))

        return interfaces
Example #42
0
    def load_interfaces(self, directory):
        for interface in self.project_reader.interface_list:
            # Find decoder
            decoder = self.decoder_factory.find_decoder(interface["tag"])

            # Decode and Show the interface
            try:
                decoded_interface = decoder.decode(interface, directory)
                decoded_interface.show()
            except Exception as e:
                # Catch any exception and log it for the encoder
                if isinstance(e, KeyboardInterrupt):
                    raise
                logger.warning(
                    "Project Loader: An interface could not be loaded error: "
                    + str(e))
Example #43
0
 def read_project(self, file_name):
     """
     :param file_name: String or string castable object; the file_name of the project
     Will read the project file in from the file_name that is given.
     try:
     """
     try:
         with open(file_name) as f:
             json_data = json.load(f)
             self.workspace_names = json_data["workspaces"]
             self.plot_list = json_data["plots"]
             self.interface_list = json_data["interfaces"]
     except Exception as e:
         if isinstance(e, KeyboardInterrupt):
             raise
         logger.warning("JSON project file unable to be loaded/read")
Example #44
0
    def fix(self, *args):
        """
        Set fixes for the parameters in the function.

        @param args: A list of parameters to fix. Specifying 'all' will fix all of the parameters in a function.
        """
        params = self._validate_parameter_args(*args)

        if "all" in [param.lower() for param in params]:
            self.function.fixAll()
        else:
            for param in params:
                if self.function.hasParameter(self.prefix + param):
                    self.function.fixParameter(self.prefix + param)
                else:
                    logger.warning(f"Cannot fix parameter '{param}' as it does not exist in the Function.")
Example #45
0
 def read_project(self, file_name):
     """
     :param file_name: String or string castable object; the file_name of the project
     Will read the project file in from the file_name that is given.
     try:
     """
     try:
         with open(file_name) as f:
             json_data = json.load(f)
             self.workspace_names = json_data["workspaces"]
             self.plot_list = json_data["plots"]
             self.interface_list = json_data["interfaces"]
     except Exception as e:
         if isinstance(e, KeyboardInterrupt):
             raise
         logger.warning("JSON project file unable to be loaded/read")
Example #46
0
 def check_energy_range_for_zeroes(self, first_data_point, last_data_point):
     if first_data_point > self._e_min:
         logger.warning("Sample workspace contains leading zeros within the energy range.")
         logger.warning("Updating eMin: eMin = " + str(first_data_point))
         self._e_min = first_data_point
     if last_data_point < self._e_max:
         logger.warning("Sample workspace contains trailing zeros within the energy range.")
         logger.warning("Updating eMax: eMax = " + str(last_data_point))
         self._e_max = last_data_point
    def _get_spectra_index(self, input_ws):
        """
        Gets the index of the two monitors and first detector for the current instrument configurtion.
        Assumes monitors are named monitor1 and monitor2
        """

        workspace = mtd[input_ws]
        instrument = workspace.getInstrument()

        # Get workspace index of first detector
        detector_1_idx = 2

        try:
            # First try to get first detector for current analyser bank
            analyser = instrument.getStringParameter('analyser')[0]
            detector_1_idx = instrument.getComponentByName(analyser)[0].getID() - 1
            logger.information('Got index of first detector for analyser %s: %d' % (analyser, detector_1_idx))

        except IndexError:
            # If that fails just get the first spectrum which is a detector
            spectrumInfo = workspace.spectrumInfo()
            for spec_idx in range(workspace.getNumberHistograms()):
                if not spectrumInfo.isMonitor(spec_idx):
                    detector_1_idx = spec_idx
                    logger.information('Got index of first detector in workspace: %d' % (detector_1_idx))
                    break

        # Get workspace index of monitor(s)
        monitor_1_idx = 0
        monitor_2_idx = None

        if instrument.hasParameter('Workflow.Monitor1-SpectrumNumber'):
            # First try to get monitors based on workflow parameters in IPF
            monitor_1_idx = int(instrument.getNumberParameter('Workflow.Monitor1-SpectrumNumber')[0])

            if instrument.hasParameter('Workflow.Monitor2-SpectrumNumber'):
                monitor_2_idx = int(instrument.getNumberParameter('Workflow.Monitor2-SpectrumNumber')[0])

            logger.information('Got index of monitors: %d, %s' % (monitor_1_idx, str(monitor_2_idx)))

        else:
            # If that fails just use some default values (correct ~60% of the time)
            monitor_2_idx = 1
            logger.warning('Could not determine index of monitors, using default values.')

        return monitor_1_idx, monitor_2_idx, detector_1_idx
def rebin_reduction(workspace_name, rebin_string, multi_frame_rebin_string, num_bins):
    """
    @param workspace_name Name of workspace to rebin
    @param rebin_string Rebin parameters
    @param multi_frame_rebin_string Rebin string for multiple frame rebinning
    @param num_bins Max number of bins in input frames
    """
    from mantid.simpleapi import (Rebin, SortXAxis)

    if rebin_string is not None:
        if multi_frame_rebin_string is not None and num_bins is not None:
            # Multi frame data
            if mtd[workspace_name].blocksize() == num_bins:
                Rebin(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      Params=rebin_string)
            else:
                Rebin(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      Params=multi_frame_rebin_string)
        else:
            # Regular data
            SortXAxis(InputWorkspace=workspace_name,
                      OutputWorkspace=workspace_name,
                      IgnoreHistogramValidation=True)
            Rebin(InputWorkspace=workspace_name,
                  OutputWorkspace=workspace_name,
                  Params=rebin_string)
    else:
        try:
            # If user does not want to rebin then just ensure uniform binning across spectra
            # extract the binning parameters from the first spectrum.
            # there is probably a better way to calculate the binning parameters, but this
            # gets the right answer.
            xaxis = mtd[workspace_name].readX(0)
            params = []
            for i, x in enumerate(xaxis):
                params.append(x)
                if i < len(xaxis) -1:
                    params.append(xaxis[i+1] - x) # delta
            Rebin(InputWorkspace=workspace_name,
                  OutputWorkspace=workspace_name,
                  Params=params)
        except RuntimeError:
            logger.warning('Rebinning failed, will try to continue anyway.')
Example #49
0
    def write_out(self):
        """
        Write out the project file that contains workspace names, interfaces information, plot preferences etc.
        """
        # Get the JSON string versions
        to_save_dict = {"workspaces": self.workspace_names, "plots": self.plots_to_save,
                        "interfaces": self.interfaces_to_save}

        # Open file and save the string to it alongside the workspace_names
        if self.project_file_ext not in os.path.basename(self.file_name):
            self.file_name = self.file_name + self.project_file_ext
        try:
            with open(self.file_name, "w+") as f:
                dump(obj=to_save_dict, fp=f)
        except Exception as e:
            # Catch any exception and log it
            if isinstance(e, KeyboardInterrupt):
                raise
            logger.warning("JSON project file unable to be opened/written to")
Example #50
0
    def runTest(self):
        # Disable for Python 2.6 (which we still have on rhel6)
        import sys
        vers = sys.version_info
        if vers < (2,7,0):
            from mantid import logger
            logger.warning("Not running this test as it requires Python >= 2.7. Version found: {0}".
                           format(vers))
            self._success = True
            return

        self._success = False
        # Custom code to create and run this single test suite
        suite = unittest.TestSuite()
        suite.addTest(unittest.makeSuite(ImagingIMATTomoTests, "test") )
        runner = unittest.TextTestRunner()
        # Run using either runner
        res = runner.run(suite)
        self._success = res.wasSuccessful()
Example #51
0
    def __init__(self, **kwargs):
        self._shape_type = common.dictionary_key_helper(dictionary=kwargs, key="shape", throws=False)
        if self._shape_type is None:
            self._shape_type = "cylinder"
            warning = "Failed to supply parameter \"shape\" to SampleDetails - defaulting to \"cylinder\""
            print("WARNING: {}".format(warning))  # Show warning in script window
            logger.warning(warning)               # Show warning in Mantid logging area

        center = common.dictionary_key_helper(dictionary=kwargs, key="center",
                                              exception_msg=property_err_string.format("center"))
        SampleDetails._validate_center(center)
        self._center = [float(i) for i in center]  # List of X, Y, Z position

        if self._shape_type == "cylinder":
            self._shape = _Cylinder(kwargs)
        elif self._shape_type == "slab":
            self._shape = _Slab(kwargs)
        else:
            raise KeyError("Shape type \"" + self._shape_type + "\" not supported: current supported shape types are "
                           "\"cylinder\" and \"slab\"")

        self.material_object = None
        sys.exit()    
    else:
        filename = sys.argv[1]
        outdir = sys.argv[2]
    nexus_file=sys.argv[1]
    output_directory=sys.argv[2]
    output_file=os.path.split(nexus_file)[-1].replace('.nxs.h5','')

    # load file
    raw=Load(nexus_file)
    
    # Do the cross-correlation and save the file
    try:
        cc=CorelliCrossCorrelate(raw,56000)
    except RuntimeError, e:
        logger.warning("Cross Correlation failed because: " + str(e))
        CCsucceded=False
    else:
        SaveNexus(cc, Filename=output_directory+output_file+"_elastic.nxs")
        CCsucceded=True

    # validate inputs
    config=processInputs()
    config.validate()

    # Masking - use vanadium, then add extra masks
    if config.can_do_norm:
        MaskDetectors(Workspace=raw,MaskedWorkspace='autoreduction_sa')
    if config.good_mask:
        for d in config.mask:
            if d.values()!=['', '', '']:
Example #53
0
def plotSofQW(workspace, QMin=0., QMax=None, EMin=None, EMax=None, VMin=0., VMax=None, colormap='jet', colorscale='linear'):
    """Plot a 2D :math:`S(Q,E)` workspace.

    :param workspace: a workspace to plot
    :type workspace: str or :class:`mantid.api.MatrixWorkspace`
    :param QMin: minimum :math:`Q` to include in the plot
    :type QMin: float or None
    :param QMax: maximum :math:`Q` to include in the plot
    :type QMax: float or None
    :param EMin: minimum energy transfer to include in the plot
    :type EMin: float or None
    :param EMax: maximum energy transfer to include in the plot
    :type EMax: float or None
    :param VMin: minimum intensity to show on the color bar
    :type VMin: float or None
    :param VMax: maximum intensity to show on the color bar
    :type VMax: float or None
    :param colormap: name of the colormap
    :type colormap: str
    :param colorscale: color map scaling: 'linear', 'log'
    :type colorscale: str
    :returns: a tuple of (:mod:`matplotlib.Figure`, :mod:`matplotlib.Axes`)
    """
    workspace = _normws(workspace)
    if not _validate._isSofQW(workspace):
        logger.warning("The workspace '{}' does not look like proper S(Q,W) data. Trying to plot nonetheless.".format(str(workspace)))
    qHorizontal = workspace.getAxis(0).getUnit().name() == 'q'
    isSusceptibility = workspace.YUnitLabel() == 'Dynamic susceptibility'
    figure, axes = subplots()
    if QMin is None:
        QMin = 0.
    if QMax is None:
        dummy, QMax = validQ(workspace)
    if EMin is None:
        if isSusceptibility:
            EMin = 0.
        else:
            EMin, unusedEMax = _energylimits(workspace)
    if EMax is None:
        EAxisIndex = 1 if qHorizontal else 0
        EAxis = workspace.getAxis(EAxisIndex).extractValues()
        EMax = EAxis[-1]
    if VMax is None:
        vertMax = EMax if EMax is not None else numpy.inf
        dummy, VMax = nanminmax(workspace, horMin=QMin, horMax=QMax, vertMin=EMin, vertMax=vertMax)
        VMax /= 100.
    if VMin is None:
        VMin = 0.
    colorNormalization = None
    if colorscale == 'linear':
        colorNormalization = matplotlib.colors.Normalize()
    elif colorscale == 'log':
        if VMin <= 0.:
            if VMax > 0.:
                VMin = VMax / 1000.
            else:
                raise RuntimeError('Cannot plot nonpositive range in log scale.')
        colorNormalization = matplotlib.colors.LogNorm()
    else:
        raise RuntimeError('Unknown colorscale: ' + colorscale)
    contours = axes.pcolor(workspace, vmin=VMin, vmax=VMax, distribution=True, cmap=colormap, norm=colorNormalization)
    colorbar = figure.colorbar(contours)
    if isSusceptibility:
        colorbar.set_label(r"$\chi''(Q,E)$ (arb. units)")
    else:
        colorbar.set_label(r'$S(Q,E)$ (arb. units)')
    if qHorizontal:
        xLimits = {'left': QMin, 'right': QMax}
        yLimits = {'bottom': EMin}
        if EMax is not None:
            yLimits['top'] = EMax
        xLabel = r'$Q$ ($\mathrm{\AA}^{-1}$)'
        yLabel = 'Energy (meV)'
    else:
        xLimits = {'left': EMin}
        if EMax is not None:
            xLimits['right'] = EMax
        yLimits = {'bottom': QMin, 'top': QMax}
        xLabel = 'Energy (meV)'
        yLabel = r'$Q$ ($\mathrm{\AA}^{-1}$)'
    axes.set_xlim(**xLimits)
    axes.set_ylim(**yLimits)
    axes.set_xlabel(xLabel)
    axes.set_ylabel(yLabel)
    _SofQWtitle(workspace, axes)
    return figure, axes
Example #54
0
    def PyExec(self):
        #from IndirectImport import run_f2py_compatibility_test, is_supported_f2py_platform

        run_f2py_compatibility_test()

        from IndirectBayes import (CalcErange, GetXYE, ReadNormFile,
                                   ReadWidthFile, QLAddSampleLogs, C2Fw,
                                   C2Se, QuasiPlot)
        from IndirectCommon import (CheckXrange, CheckAnalysers, getEfixed, GetThetaQ,
                                    CheckHistZero, CheckHistSame, IndentifyDataBoundaries)
        setup_prog = Progress(self, start=0.0, end=0.3, nreports = 5)
        self.log().information('BayesQuasi input')

        erange = [self._e_min, self._e_max]
        nbins = [self._sam_bins, self._res_bins]
        setup_prog.report('Converting to binary for Fortran')
        #convert true/false to 1/0 for fortran
        o_el = 1 if self._elastic else 0
        o_w1 = 1 if self._width else 0
        o_res = 1 if self._res_norm else 0

        #fortran code uses background choices defined using the following numbers
        setup_prog.report('Encoding input options')
        if self._background == 'Sloping':
            o_bgd = 2
        elif self._background == 'Flat':
            o_bgd = 1
        elif self._background == 'Zero':
            o_bgd = 0

        fitOp = [o_el, o_bgd, o_w1, o_res]

        setup_prog.report('Establishing save path')
        workdir = config['defaultsave.directory']
        if not os.path.isdir(workdir):
            workdir = os.getcwd()
            logger.information('Default Save directory is not set. Defaulting to current working Directory: ' + workdir)

        array_len = 4096                           # length of array in Fortran
        setup_prog.report('Checking X Range')
        CheckXrange(erange,'Energy')

        nbin,nrbin = nbins[0], nbins[1]

        logger.information('Sample is ' + self._samWS)
        logger.information('Resolution is ' + self._resWS)

        # Check for trailing and leading zeros in data
        setup_prog.report('Checking for leading and trailing zeros in the data')
        first_data_point, last_data_point = IndentifyDataBoundaries(self._samWS)
        if first_data_point > self._e_min:
            logger.warning("Sample workspace contains leading zeros within the energy range.")
            logger.warning("Updating eMin: eMin = " + str(first_data_point))
            self._e_min = first_data_point
        if last_data_point < self._e_max:
            logger.warning("Sample workspace contains trailing zeros within the energy range.")
            logger.warning("Updating eMax: eMax = " + str(last_data_point))
            self._e_max = last_data_point

        # update erange with new values
        erange = [self._e_min, self._e_max]

        setup_prog.report('Checking Analysers')
        CheckAnalysers(self._samWS,self._resWS)
        setup_prog.report('Obtaining EFixed, theta and Q')
        efix = getEfixed(self._samWS)
        theta, Q = GetThetaQ(self._samWS)

        nsam,ntc = CheckHistZero(self._samWS)

        totalNoSam = nsam

        #check if we're performing a sequential fit
        if self._loop != True:
            nsam = 1

        nres = CheckHistZero(self._resWS)[0]

        setup_prog.report('Checking Histograms')
        if self._program == 'QL':
            if nres == 1:
                prog = 'QLr'                        # res file
            else:
                prog = 'QLd'                        # data file
                CheckHistSame(self._samWS,'Sample',self._resWS,'Resolution')
        elif self._program == 'QSe':
            if nres == 1:
                prog = 'QSe'                        # res file
            else:
                raise ValueError('Stretched Exp ONLY works with RES file')

        logger.information('Version is ' +prog)
        logger.information(' Number of spectra = '+str(nsam))
        logger.information(' Erange : '+str(erange[0])+' to '+str(erange[1]))

        setup_prog.report('Reading files')
        Wy,We = ReadWidthFile(self._width,self._wfile,totalNoSam)
        dtn,xsc = ReadNormFile(self._res_norm,self._resnormWS,totalNoSam)

        setup_prog.report('Establishing output workspace name')
        fname = self._samWS[:-4] + '_'+ prog
        probWS = fname + '_Prob'
        fitWS = fname + '_Fit'
        wrks=os.path.join(workdir, self._samWS[:-4])
        logger.information(' lptfile : '+wrks+'_'+prog+'.lpt')
        lwrk=len(wrks)
        wrks.ljust(140,' ')
        wrkr=self._resWS
        wrkr.ljust(140,' ')

        setup_prog.report('Initialising probability list')
        # initialise probability list
        if self._program == 'QL':
            prob0 = []
            prob1 = []
            prob2 = []
        xQ = np.array([Q[0]])
        for m in range(1,nsam):
            xQ = np.append(xQ,Q[m])
        xProb = xQ
        xProb = np.append(xProb,xQ)
        xProb = np.append(xProb,xQ)
        eProb = np.zeros(3*nsam)

        group = ''
        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam*3)
        for m in range(0,nsam):
            logger.information('Group ' +str(m)+ ' at angle '+ str(theta[m]))
            nsp = m+1
            nout,bnorm,Xdat,Xv,Yv,Ev = CalcErange(self._samWS,m,erange,nbin)
            Ndat = nout[0]
            Imin = nout[1]
            Imax = nout[2]
            if prog == 'QLd':
                mm = m
            else:
                mm = 0
            Nb,Xb,Yb,Eb = GetXYE(self._resWS,mm,array_len)     # get resolution data
            numb = [nsam, nsp, ntc, Ndat, nbin, Imin, Imax, Nb, nrbin]
            rscl = 1.0
            reals = [efix, theta[m], rscl, bnorm]

            if prog == 'QLr':
                workflow_prog.report('Processing Sample number %i as Lorentzian' % nsam)
                nd,xout,yout,eout,yfit,yprob=QLr.qlres(numb,Xv,Yv,Ev,reals,fitOp,
                                                       Xdat,Xb,Yb,Wy,We,dtn,xsc,
                                                       wrks,wrkr,lwrk)
                message = ' Log(prob) : '+str(yprob[0])+' '+str(yprob[1])+' '+str(yprob[2])+' '+str(yprob[3])
                logger.information(message)
            if prog == 'QLd':
                workflow_prog.report('Processing Sample number %i' % nsam)
                nd,xout,yout,eout,yfit,yprob=QLd.qldata(numb,Xv,Yv,Ev,reals,fitOp,
                                                        Xdat,Xb,Yb,Eb,Wy,We,
                                                        wrks,wrkr,lwrk)
                message = ' Log(prob) : '+str(yprob[0])+' '+str(yprob[1])+' '+str(yprob[2])+' '+str(yprob[3])
                logger.information(message)
            if prog == 'QSe':
                workflow_prog.report('Processing Sample number %i as Stretched Exp' % nsam)
                nd,xout,yout,eout,yfit,yprob=Qse.qlstexp(numb,Xv,Yv,Ev,reals,fitOp,\
                                                        Xdat,Xb,Yb,Wy,We,dtn,xsc,\
                                                        wrks,wrkr,lwrk)
            dataX = xout[:nd]
            dataX = np.append(dataX,2*xout[nd-1]-xout[nd-2])
            yfit_list = np.split(yfit[:4*nd],4)
            dataF1 = yfit_list[1]
            if self._program == 'QL':
                dataF2 = yfit_list[2]
            workflow_prog.report('Processing data')
            dataG = np.zeros(nd)
            datX = dataX
            datY = yout[:nd]
            datE = eout[:nd]
            datX = np.append(datX,dataX)
            datY = np.append(datY,dataF1[:nd])
            datE = np.append(datE,dataG)
            res1 = dataF1[:nd] - yout[:nd]
            datX = np.append(datX,dataX)
            datY = np.append(datY,res1)
            datE = np.append(datE,dataG)
            nsp = 3
            names = 'data,fit.1,diff.1'
            res_plot = [0, 1, 2]
            if self._program == 'QL':
                workflow_prog.report('Processing Lorentzian result data')
                datX = np.append(datX,dataX)
                datY = np.append(datY,dataF2[:nd])
                datE = np.append(datE,dataG)
                res2 = dataF2[:nd] - yout[:nd]
                datX = np.append(datX,dataX)
                datY = np.append(datY,res2)
                datE = np.append(datE,dataG)
                nsp += 2
                names += ',fit.2,diff.2'
                res_plot.append(4)
                prob0.append(yprob[0])
                prob1.append(yprob[1])
                prob2.append(yprob[2])

            # create result workspace
            fitWS = fname+'_Workspaces'
            fout = fname+'_Workspace_'+ str(m)

            workflow_prog.report('Creating OutputWorkspace')
            CreateWorkspace(OutputWorkspace=fout, DataX=datX, DataY=datY, DataE=datE,\
                Nspec=nsp, UnitX='DeltaE', VerticalAxisUnit='Text', VerticalAxisValues=names)

            # append workspace to list of results
            group += fout + ','

        comp_prog = Progress(self, start=0.7, end=0.8, nreports=2)
        comp_prog.report('Creating Group Workspace')
        GroupWorkspaces(InputWorkspaces=group,OutputWorkspace=fitWS)

        if self._program == 'QL':
            comp_prog.report('Processing Lorentzian probability data')
            yPr0 = np.array([prob0[0]])
            yPr1 = np.array([prob1[0]])
            yPr2 = np.array([prob2[0]])
            for m in range(1,nsam):
                yPr0 = np.append(yPr0,prob0[m])
                yPr1 = np.append(yPr1,prob1[m])
                yPr2 = np.append(yPr2,prob2[m])
            yProb = yPr0
            yProb = np.append(yProb,yPr1)
            yProb = np.append(yProb,yPr2)
            probWs = CreateWorkspace(OutputWorkspace=probWS, DataX=xProb, DataY=yProb, DataE=eProb,\
                Nspec=3, UnitX='MomentumTransfer')
            outWS = C2Fw(self._samWS[:-4],fname)
            if self._plot != 'None':
                QuasiPlot(fname,self._plot,res_plot,self._loop)
        if self._program == 'QSe':
            comp_prog.report('Runnning C2Se')
            outWS = C2Se(fname)
            if self._plot != 'None':
                QuasiPlot(fname,self._plot,res_plot,self._loop)

        log_prog = Progress(self, start=0.8, end =1.0, nreports=8)
        #Add some sample logs to the output workspaces
        log_prog.report('Copying Logs to outputWorkspace')
        CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=outWS)
        log_prog.report('Adding Sample logs to Output workspace')
        QLAddSampleLogs(outWS, self._resWS, prog, self._background, self._elastic, erange,
                        (nbin, nrbin), self._resnormWS, self._wfile)
        log_prog.report('Copying logs to fit Workspace')
        CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=fitWS)
        log_prog.report('Adding sample logs to Fit workspace')
        QLAddSampleLogs(fitWS, self._resWS, prog, self._background, self._elastic, erange,
                        (nbin, nrbin), self._resnormWS, self._wfile)
        log_prog.report('Finialising log copying')

        if self._save:
            log_prog.report('Saving workspaces')
            fit_path = os.path.join(workdir,fitWS+'.nxs')
            SaveNexusProcessed(InputWorkspace=fitWS, Filename=fit_path)
            out_path = os.path.join(workdir, outWS+'.nxs')                    # path name for nxs file
            SaveNexusProcessed(InputWorkspace=outWS, Filename=out_path)
            logger.information('Output fit file created : ' + fit_path)
            logger.information('Output paramter file created : ' + out_path)

        self.setProperty('OutputWorkspaceFit', fitWS)
        self.setProperty('OutputWorkspaceResult', outWS)
        log_prog.report('Setting workspace properties')

        if self._program == 'QL':
            self.setProperty('OutputWorkspaceProb', probWS)
Example #55
0
    def PyExec(self):
        scale = self.getProperty("Scale").value
        filenames = self.getProperty("Filename").value

        if not filenames:
            ipts = self.getProperty("IPTS").value
            exp = self.getProperty("Exp").value
            if self.getProperty("Exp").value == Property.EMPTY_INT:
                exp = int([e for e in os.listdir('/HFIR/HB2A/IPTS-{0}'.format(ipts)) if 'exp' in e][0].replace('exp',''))
            filenames = ['/HFIR/HB2A/IPTS-{0}/exp{1}/Datafiles/HB2A_exp{1:04}_scan{2:04}.dat'.format(ipts, exp, scan)
                         for scan in self.getProperty("ScanNumbers").value]

        metadata = None
        data = None

        # Read in data array and append all files
        for filename in filenames:
            # Read in all lines once
            with open(filename) as f:
                lines = f.readlines()

            if metadata is None:
                # Read in metadata from first file only file
                metadata = dict([np.char.strip(re.split('#(.*?)=(.*)', line, flags=re.U)[1:3])
                                 for line in lines if re.match('^#.*=', line)])
                # Get indir and exp from first file
                indir, data_filename = os.path.split(filename)
                _, exp, _ = data_filename.replace(".dat", "").split('_')

            # Find size of header, the size changes
            header = np.argmax([bool(re.match('(?!^#)', line)) for line in lines])-1
            if header < 0:
                raise RuntimeError("{} has no data in it".format(filename))
            names = lines[header].split()[1:]

            try:
                d = np.loadtxt(lines[header:], ndmin=1, dtype={'names': names, 'formats':[float]*len(names)})
            except (ValueError, IndexError):
                raise RuntimeError("Could not read {}, file likely malformed".format(filename))

            # Accumulate data
            data = d if data is None else np.append(data, d)

        # Get any masked detectors
        detector_mask = self.get_detector_mask(exp, indir)

        counts = np.array([data['anode{}'.format(n)] for n in range(1,45)])[detector_mask]
        twotheta = data['2theta']
        monitor = data['monitor']

        # Remove points with zero monitor count
        monitor_mask = np.nonzero(monitor)[0]
        if len(monitor_mask) == 0:
            raise RuntimeError("{} has all zero monitor counts".format(filename))
        monitor = monitor[monitor_mask]
        counts = counts[:, monitor_mask]
        twotheta = twotheta[monitor_mask]

        # Get either vcorr file or vanadium data
        vanadium_count, vanadium_monitor, vcorr = self.get_vanadium(detector_mask,
                                                                    data['m1'][0], data['colltrans'][0],
                                                                    exp, indir)

        def_x = self.getProperty("DefX").value
        if not def_x:
            def_x = metadata['def_x']

        if def_x not in data.dtype.names:
            logger.warning("Could not find {} property in datafile, using 2theta instead".format(def_x))
            def_x = '2theta'

        if def_x == '2theta':
            x = twotheta+self._gaps[:, np.newaxis][detector_mask]
            UnitX='Degrees'
        else:
            x = np.tile(data[def_x], (44,1))[detector_mask][:, monitor_mask]
            UnitX=def_x

        if self.getProperty("IndividualDetectors").value:
            # Separate spectrum per anode
            y, e = self.process(counts, scale, monitor, vanadium_count, vanadium_monitor, vcorr)
            NSpec=len(x)
        else:
            if self.getProperty("BinData").value:
                # Data binned with bin
                x, y, e = self.process_binned(counts, x.ravel(), scale, monitor, vanadium_count, vanadium_monitor, vcorr)
            else:
                y, e = self.process(counts, scale, monitor, vanadium_count, vanadium_monitor, vcorr)
            NSpec=1

        createWS_alg = self.createChildAlgorithm("CreateWorkspace", enableLogging=False)
        createWS_alg.setProperty("DataX", x)
        createWS_alg.setProperty("DataY", y)
        createWS_alg.setProperty("DataE", e)
        createWS_alg.setProperty("NSpec", NSpec)
        createWS_alg.setProperty("UnitX", UnitX)
        createWS_alg.setProperty("YUnitLabel", "Counts")
        createWS_alg.setProperty("WorkspaceTitle", str(metadata['scan_title']))
        createWS_alg.execute()
        outWS = createWS_alg.getProperty("OutputWorkspace").value

        self.setProperty("OutputWorkspace", outWS)

        self.add_metadata(outWS, metadata, data)
 def validate(self):
     # validate normalization
     if os.path.isfile(self.vanadium_SA_file) and os.path.isfile(self.vanadium_flux_file):
         try:
             Load(self.vanadium_SA_file,OutputWorkspace='autoreduction_sa')
             Load(self.vanadium_flux_file,OutputWorkspace='autoreduction_flux')
             self.can_do_norm=True
         except:
             logger.warning("Could not load normalization vanadium")
             self.can_do_norm=False
     # if ub_matrix_file not given us newest *.mat in IPTS shared directory
     if self.ub_matrix_file == '':
         mat_list=[]
         for root, dirs, files in os.walk(os.path.abspath(os.path.join(sys.argv[2],".."))): # Look in IPTS shared
             for f in files:
                 if f.endswith(".mat"):
                     mat_list.append(os.path.join(root, f))
         if len(mat_list)==0:
             self.ub_matrix_file = ''
         else:
             self.ub_matrix_file = max(mat_list,key=os.path.getctime)
     # validate UB
     if os.path.isfile(self.ub_matrix_file):
         try:
             autoreduction_ub=CreateSingleValuedWorkspace(0.)
             LoadIsawUB(InputWorkspace=autoreduction_ub,Filename=self.ub_matrix_file)
             if autoreduction_ub.sample().hasOrientedLattice():
                 self.can_do_HKL=True
             else:
                 self.can_do_HKL=False
                 logger.warning("Could not load UB")
         except:
             self.can_do_HKL=False
             logger.warning("Could not load UB")
     # validate mask
     self.good_mask=False
     lenMask=len(self.mask)
     if lenMask>0:
         self.good_mask=True
         for i in range(lenMask):
             dicti=self.mask[i]
             if not isinstance(dicti,dict) or set(dicti.keys()).issubset(set(['Bank','Tube','Pixel'])):
                 self.good_mask=False
     if not self.good_mask:
         logger.warning("BTP mask is missing or invalid. It will be ignored")
     # validate plotting options
     for pl in self.plot_requests:
         if not isinstance(pl,dict):
             logger.warning("this is not a dict: "+str(pl))
             continue
         if set(pl.keys())!=set(['PerpendicularTo','Minimum','Maximum']):
             logger.warning("There are not enough or some invalid keys: "+str(pl.keys()))
             continue
         if pl['PerpendicularTo'] not in ['Q_sample_x','Q_sample_y','Q_sample_z','[H,0,0]','[0,K,0]','[0,0,L]']:
             logger.warning("Could not find this direction: "+str(pl['PerpendicularTo']))
             continue
         if not self.can_do_HKL and pl['PerpendicularTo'] in ['[H,0,0]','[0,K,0]','[0,0,L]']:
             logger.warning("Will not be able to convert to HKL")
             continue
         if self.can_do_HKL and pl['PerpendicularTo'] not in ['[H,0,0]','[0,K,0]','[0,0,L]']:
             logger.warning("Data will be in HKL - picture not created")
             continue
         self.plots.append(pl)