Exemplo n.º 1
0
    def do_simultaneous_fit_and_return_workspace_parameters_and_fit_function(
            self, parameters_dict):
        alg = mantid.AlgorithmManager.create("Fit")

        output_workspace, output_parameters, function_object, output_status, output_chi, covariance_matrix \
            = run_simultaneous_Fit(parameters_dict, alg)
        if len(parameters_dict['InputWorkspace']) > 1:
            for input_workspace, output in zip(parameters_dict['InputWorkspace'],
                                               mantid.api.AnalysisDataService.retrieve(output_workspace).getNames()):
                CopyLogs(InputWorkspace=input_workspace, OutputWorkspace=output, StoreInADS=False)
        else:
            CopyLogs(InputWorkspace=parameters_dict['InputWorkspace'][0], OutputWorkspace=output_workspace,
                     StoreInADS=False)
        return output_workspace, output_parameters, function_object, output_status, output_chi, covariance_matrix
Exemplo n.º 2
0
 def do_single_fit_and_return_workspace_parameters_and_fit_function(
         self, parameters_dict):
     alg = mantid.AlgorithmManager.create("Fit")
     output_workspace, output_parameters, function_object, output_status, output_chi, covariance_matrix = run_Fit(
         parameters_dict, alg)
     CopyLogs(InputWorkspace=parameters_dict['InputWorkspace'], OutputWorkspace=output_workspace, StoreInADS=False)
     return output_workspace, output_parameters, function_object, output_status, output_chi, covariance_matrix
Exemplo n.º 3
0
 def _copy_logs(self, input_workspaces, output_workspace: str) -> None:
     """Copy the logs from the input workspace(s) to the output workspaces."""
     if self.fitting_context.number_of_datasets == 1:
         CopyLogs(InputWorkspace=input_workspaces[0],
                  OutputWorkspace=output_workspace,
                  StoreInADS=False)
     else:
         self._copy_logs_for_all_datsets(input_workspaces, output_workspace)
Exemplo n.º 4
0
    def do_simultaneous_tf_fit(self, parameter_dict, global_parameters):
        alg = mantid.AlgorithmManager.create("CalculateMuonAsymmetry")
        output_workspace, fitting_parameters_table, function_object, output_status, output_chi_squared, covariance_matrix = \
            run_CalculateMuonAsymmetry(parameter_dict, alg)
        if len(parameter_dict['ReNormalizedWorkspaceList']) > 1:
            for input_workspace, output in zip(parameter_dict['ReNormalizedWorkspaceList'],
                                               mantid.api.AnalysisDataService.retrieve(output_workspace).getNames()):
                CopyLogs(InputWorkspace=input_workspace, OutputWorkspace=output, StoreInADS=False)
        else:
            CopyLogs(InputWorkspace=parameter_dict['ReNormalizedWorkspaceList'][0], OutputWorkspace=output_workspace,
                     StoreInADS=False)

        self._handle_simultaneous_fit_results(parameter_dict['ReNormalizedWorkspaceList'], function_object,
                                              fitting_parameters_table, output_workspace, global_parameters,
                                              covariance_matrix)

        return function_object, output_status, output_chi_squared
Exemplo n.º 5
0
 def _copy_logs_for_all_datsets(self, input_workspaces: list,
                                output_group: str) -> None:
     """Copy the logs from the input workspaces to the output workspaces."""
     for input_workspace, output in zip(
             input_workspaces,
             self._get_names_in_group_workspace(output_group)):
         CopyLogs(InputWorkspace=input_workspace,
                  OutputWorkspace=output,
                  StoreInADS=False)
Exemplo n.º 6
0
    def do_single_tf_fit(self, parameter_dict):
        alg = mantid.AlgorithmManager.create("CalculateMuonAsymmetry")
        output_workspace, fitting_parameters_table, function_object, output_status, output_chi_squared, covariance_matrix = \
            run_CalculateMuonAsymmetry(parameter_dict, alg)
        CopyLogs(InputWorkspace=parameter_dict['ReNormalizedWorkspaceList'], OutputWorkspace=output_workspace,
                 StoreInADS=False)
        self._handle_single_fit_results(parameter_dict['ReNormalizedWorkspaceList'], function_object,
                                        fitting_parameters_table, output_workspace, covariance_matrix)

        return function_object, output_status, output_chi_squared
Exemplo n.º 7
0
    def do_single_fit_and_return_workspace_parameters_and_fit_function(
            self, parameters_dict):
        if 'DoublePulseEnabled' in self.context.gui_context and self.context.gui_context[
                'DoublePulseEnabled']:
            alg = self._create_double_pulse_alg()
        else:
            alg = mantid.AlgorithmManager.create("Fit")

        output_workspace, output_parameters, function_object, output_status, output_chi, covariance_matrix = run_Fit(
            parameters_dict, alg)
        CopyLogs(InputWorkspace=parameters_dict['InputWorkspace'],
                 OutputWorkspace=output_workspace,
                 StoreInADS=False)
        return output_workspace, output_parameters, function_object, output_status, output_chi, covariance_matrix
Exemplo n.º 8
0
    def __processFile(self, filename, file_prog_start,
                      determineCharacterizations,
                      createUnfocused):  # noqa: C902,C901
        # create a unique name for the workspace
        wkspname = '__' + self.__wkspNameFromFile(filename)
        wkspname += '_f%d' % self._filenames.index(
            filename)  # add file number to be unique
        unfocusname = ''
        if createUnfocused:
            unfocusname = wkspname + '_unfocused'

        # check for a cachefilename
        cachefile = self.__getCacheName(self.__wkspNameFromFile(filename))
        self.log().information('looking for cachefile "{}"'.format(cachefile))
        if (not createUnfocused
            ) and self.useCaching and os.path.exists(cachefile):
            try:
                if self.__loadCacheFile(cachefile, wkspname):
                    return wkspname, ''
            except RuntimeError as e:
                # log as a warning and carry on as though the cache file didn't exist
                self.log().warning('Failed to load cache file "{}": {}'.format(
                    cachefile, e))
        else:
            self.log().information('not using cache')

        chunks = determineChunking(filename, self.chunkSize)
        numSteps = 6  # for better progress reporting - 6 steps per chunk
        if createUnfocused:
            numSteps = 7  # one more for accumulating the unfocused workspace
        self.log().information('Processing \'{}\' in {:d} chunks'.format(
            filename, len(chunks)))
        prog_per_chunk_step = self.prog_per_file * 1. / (numSteps *
                                                         float(len(chunks)))

        unfocusname_chunk = ''
        canSkipLoadingLogs = False

        # inner loop is over chunks
        haveAccumulationForFile = False
        for (j, chunk) in enumerate(chunks):
            prog_start = file_prog_start + float(j) * float(
                numSteps - 1) * prog_per_chunk_step

            # if reading all at once, put the data into the final name directly
            if len(chunks) == 1:
                chunkname = wkspname
                unfocusname_chunk = unfocusname
            else:
                chunkname = '{}_c{:d}'.format(wkspname, j)
                if unfocusname:  # only create unfocus chunk if needed
                    unfocusname_chunk = '{}_c{:d}'.format(unfocusname, j)

            # load a chunk - this is a bit crazy long because we need to get an output property from `Load` when it
            # is run and the algorithm history doesn't exist until the parent algorithm (this) has finished
            loader = self.__createLoader(
                filename,
                chunkname,
                skipLoadingLogs=(len(chunks) > 1 and canSkipLoadingLogs
                                 and haveAccumulationForFile),
                progstart=prog_start,
                progstop=prog_start + prog_per_chunk_step,
                **chunk)
            loader.execute()
            if j == 0:
                self.__setupCalibration(chunkname)

            # copy the necessary logs onto the workspace
            if len(chunks
                   ) > 1 and canSkipLoadingLogs and haveAccumulationForFile:
                CopyLogs(InputWorkspace=wkspname,
                         OutputWorkspace=chunkname,
                         MergeStrategy='WipeExisting')
                # re-load instrument so detector positions that depend on logs get initialized
                try:
                    LoadIDFFromNexus(Workspace=chunkname,
                                     Filename=filename,
                                     InstrumentParentPath='/entry')
                except RuntimeError as e:
                    self.log().warning(
                        'Reloading instrument using "LoadIDFFromNexus" failed: {}'
                        .format(e))

            # get the underlying loader name if we used the generic one
            if self.__loaderName == 'Load':
                self.__loaderName = loader.getPropertyValue('LoaderName')
            # only LoadEventNexus can turn off loading logs, but FilterBadPulses
            # requires them to be loaded from the file
            canSkipLoadingLogs = self.__loaderName == 'LoadEventNexus' and self.filterBadPulses <= 0. and haveAccumulationForFile

            if determineCharacterizations and j == 0:
                self.__determineCharacterizations(
                    filename, chunkname)  # updates instance variable
                determineCharacterizations = False

            if self.__loaderName == 'LoadEventNexus' and mtd[
                    chunkname].getNumberEvents() == 0:
                self.log().notice(
                    'Chunk {} of {} contained no events. Skipping to next chunk.'
                    .format(j + 1, len(chunks)))
                continue

            prog_start += prog_per_chunk_step
            if self.filterBadPulses > 0.:
                FilterBadPulses(InputWorkspace=chunkname,
                                OutputWorkspace=chunkname,
                                LowerCutoff=self.filterBadPulses,
                                startProgress=prog_start,
                                endProgress=prog_start + prog_per_chunk_step)
                if mtd[chunkname].getNumberEvents() == 0:
                    msg = 'FilterBadPulses removed all events from '
                    if len(chunks) == 1:
                        raise RuntimeError(msg + filename)
                    else:
                        raise RuntimeError(msg + 'chunk {} of {} in {}'.format(
                            j, len(chunks), filename))

            prog_start += prog_per_chunk_step

            # absorption correction workspace
            if self.absorption is not None and len(str(self.absorption)) > 0:
                ConvertUnits(InputWorkspace=chunkname,
                             OutputWorkspace=chunkname,
                             Target='Wavelength',
                             EMode='Elastic')
                # rebin the absorption correction to match the binning of the inputs if in histogram mode
                # EventWorkspace will compare the wavelength of each individual event
                absWksp = self.absorption
                if mtd[chunkname].id() != 'EventWorkspace':
                    absWksp = '__absWkspRebinned'
                    RebinToWorkspace(WorkspaceToRebin=self.absorption,
                                     WorkspaceToMatch=chunkname,
                                     OutputWorkspace=absWksp)
                Divide(LHSWorkspace=chunkname,
                       RHSWorkspace=absWksp,
                       OutputWorkspace=chunkname,
                       startProgress=prog_start,
                       endProgress=prog_start + prog_per_chunk_step)
                if absWksp != self.absorption:  # clean up
                    DeleteWorkspace(Workspace=absWksp)
                ConvertUnits(InputWorkspace=chunkname,
                             OutputWorkspace=chunkname,
                             Target='TOF',
                             EMode='Elastic')
            prog_start += prog_per_chunk_step

            if self.kwargs is None:
                raise RuntimeError(
                    'Somehow arguments for "AlignAndFocusPowder" aren\'t set')

            AlignAndFocusPowder(InputWorkspace=chunkname,
                                OutputWorkspace=chunkname,
                                UnfocussedWorkspace=unfocusname_chunk,
                                startProgress=prog_start,
                                endProgress=prog_start +
                                2. * prog_per_chunk_step,
                                **self.kwargs)
            prog_start += 2. * prog_per_chunk_step  # AlignAndFocusPowder counts for two steps

            self.__accumulate(chunkname,
                              wkspname,
                              unfocusname_chunk,
                              unfocusname,
                              not haveAccumulationForFile,
                              removelogs=canSkipLoadingLogs)

            haveAccumulationForFile = True
        # end of inner loop
        if not mtd.doesExist(wkspname):
            raise RuntimeError(
                'Failed to process any data from file "{}"'.format(filename))

        # copy the sample object from the absorption workspace
        if self.absorption is not None and len(str(self.absorption)) > 0:
            CopySample(InputWorkspace=self.absorption,
                       OutputWorkspace=wkspname,
                       CopyEnvironment=False)

        # write out the cachefile for the main reduced data independent of whether
        # the unfocussed workspace was requested
        if self.useCaching and not os.path.exists(cachefile):
            self.log().information(
                'Saving data to cachefile "{}"'.format(cachefile))
            SaveNexusProcessed(InputWorkspace=wkspname, Filename=cachefile)

        return wkspname, unfocusname
Exemplo n.º 9
0
    def __processFile(self, filename, wkspname, unfocusname, file_prog_start,
                      determineCharacterizations):
        chunks = determineChunking(filename, self.chunkSize)
        numSteps = 6  # for better progress reporting - 6 steps per chunk
        if unfocusname != '':
            numSteps = 7  # one more for accumulating the unfocused workspace
        self.log().information('Processing \'{}\' in {:d} chunks'.format(
            filename, len(chunks)))
        prog_per_chunk_step = self.prog_per_file * 1. / (numSteps *
                                                         float(len(chunks)))
        unfocusname_chunk = ''
        canSkipLoadingLogs = False

        # inner loop is over chunks
        for (j, chunk) in enumerate(chunks):
            prog_start = file_prog_start + float(j) * float(
                numSteps - 1) * prog_per_chunk_step
            chunkname = '{}_c{:d}'.format(wkspname, j)
            if unfocusname != '':  # only create unfocus chunk if needed
                unfocusname_chunk = '{}_c{:d}'.format(unfocusname, j)

            # load a chunk - this is a bit crazy long because we need to get an output property from `Load` when it
            # is run and the algorithm history doesn't exist until the parent algorithm (this) has finished
            loader = self.__createLoader(filename,
                                         chunkname,
                                         progstart=prog_start,
                                         progstop=prog_start +
                                         prog_per_chunk_step)
            if canSkipLoadingLogs:
                loader.setProperty('LoadLogs', False)
            for key, value in chunk.items():
                if isinstance(value, str):
                    loader.setPropertyValue(key, value)
                else:
                    loader.setProperty(key, value)
            loader.execute()

            # copy the necessary logs onto the workspace
            if canSkipLoadingLogs:
                CopyLogs(InputWorkspace=wkspname,
                         OutputWorkspace=chunkname,
                         MergeStrategy='WipeExisting')

            # get the underlying loader name if we used the generic one
            if self.__loaderName == 'Load':
                self.__loaderName = loader.getPropertyValue('LoaderName')
            canSkipLoadingLogs = self.__loaderName == 'LoadEventNexus'

            if determineCharacterizations and j == 0:
                self.__determineCharacterizations(
                    filename, chunkname)  # updates instance variable
                determineCharacterizations = False

            prog_start += prog_per_chunk_step
            if self.filterBadPulses > 0.:
                FilterBadPulses(InputWorkspace=chunkname,
                                OutputWorkspace=chunkname,
                                LowerCutoff=self.filterBadPulses,
                                startProgress=prog_start,
                                endProgress=prog_start + prog_per_chunk_step)
            prog_start += prog_per_chunk_step

            # absorption correction workspace
            if self.absorption is not None and len(str(self.absorption)) > 0:
                ConvertUnits(InputWorkspace=chunkname,
                             OutputWorkspace=chunkname,
                             Target='Wavelength',
                             EMode='Elastic')
                Divide(LHSWorkspace=chunkname,
                       RHSWorkspace=self.absorption,
                       OutputWorkspace=chunkname,
                       startProgress=prog_start,
                       endProgress=prog_start + prog_per_chunk_step)
                ConvertUnits(InputWorkspace=chunkname,
                             OutputWorkspace=chunkname,
                             Target='TOF',
                             EMode='Elastic')
            prog_start += prog_per_chunk_step

            AlignAndFocusPowder(InputWorkspace=chunkname,
                                OutputWorkspace=chunkname,
                                UnfocussedWorkspace=unfocusname_chunk,
                                startProgress=prog_start,
                                endProgress=prog_start +
                                2. * prog_per_chunk_step,
                                **self.kwargs)
            prog_start += 2. * prog_per_chunk_step  # AlignAndFocusPowder counts for two steps

            if j == 0:
                self.__updateAlignAndFocusArgs(chunkname)
                RenameWorkspace(InputWorkspace=chunkname,
                                OutputWorkspace=wkspname)
                if unfocusname != '':
                    RenameWorkspace(InputWorkspace=unfocusname_chunk,
                                    OutputWorkspace=unfocusname)
            else:
                RemoveLogs(
                    Workspace=chunkname)  # accumulation has them already
                Plus(LHSWorkspace=wkspname,
                     RHSWorkspace=chunkname,
                     OutputWorkspace=wkspname,
                     ClearRHSWorkspace=self.kwargs['PreserveEvents'],
                     startProgress=prog_start,
                     endProgress=prog_start + prog_per_chunk_step)
                DeleteWorkspace(Workspace=chunkname)

                if unfocusname != '':
                    RemoveLogs(Workspace=unfocusname_chunk
                               )  # accumulation has them already
                    Plus(LHSWorkspace=unfocusname,
                         RHSWorkspace=unfocusname_chunk,
                         OutputWorkspace=unfocusname,
                         ClearRHSWorkspace=self.kwargs['PreserveEvents'],
                         startProgress=prog_start,
                         endProgress=prog_start + prog_per_chunk_step)
                    DeleteWorkspace(Workspace=unfocusname_chunk)

                if self.kwargs['PreserveEvents'] and self.kwargs[
                        'CompressTolerance'] > 0.:
                    CompressEvents(InputWorkspace=wkspname,
                                   OutputWorkspace=wkspname,
                                   WallClockTolerance=self.
                                   kwargs['CompressWallClockTolerance'],
                                   Tolerance=self.kwargs['CompressTolerance'],
                                   StartTime=self.kwargs['CompressStartTime'])
Exemplo n.º 10
0
    def PyExec(self):
        from IndirectCommon import StartTime, EndTime

        StartTime('Symmetrise')
        self._setup()
        temp_ws_name = '__symm_temp'

        # The number of spectra that will actually be changed
        num_symm_spectra = self._spectra_range[1] - self._spectra_range[0] + 1

        # Find the smallest data array in the first spectra
        len_x = len(mtd[self._sample].readX(0))
        len_y = len(mtd[self._sample].readY(0))
        len_e = len(mtd[self._sample].readE(0))
        sample_array_len = min(len_x, len_y, len_e)

        sample_x = mtd[self._sample].readX(0)

        # Get slice bounds of array
        try:
            self._calculate_array_points(sample_x, sample_array_len)
        except Exception as e:
            raise RuntimeError('Failed to calculate array slice boundaries: %s' % e.message)

        max_sample_index = sample_array_len - 1
        centre_range_len = self._positive_min_index + self._negative_min_index
        positive_diff_range_len = max_sample_index - self._positive_max_index

        output_cut_index = max_sample_index - self._positive_min_index - positive_diff_range_len - 1
        new_array_len = 2 * max_sample_index - centre_range_len - 2 * positive_diff_range_len - 1

        if self._verbose:
            logger.notice('Sample array length = %d' % sample_array_len)

            logger.notice('Positive X min at i=%d, x=%f'
                          % (self._positive_min_index, sample_x[self._positive_min_index]))
            logger.notice('Negative X min at i=%d, x=%f'
                          % (self._negative_min_index, sample_x[self._negative_min_index]))

            logger.notice('Positive X max at i=%d, x=%f'
                          % (self._positive_max_index, sample_x[self._positive_max_index]))

            logger.notice('New array length = %d' % new_array_len)
            logger.notice('Output array LR split index = %d' % output_cut_index)

        x_unit = mtd[self._sample].getAxis(0).getUnit().unitID()
        v_unit = mtd[self._sample].getAxis(1).getUnit().unitID()
        v_axis_data = mtd[self._sample].getAxis(1).extractValues()

        # Take the values we need from the original vertical axis
        min_spectrum_index = mtd[self._sample].getIndexFromSpectrumNumber(int(self._spectra_range[0]))
        max_spectrum_index = mtd[self._sample].getIndexFromSpectrumNumber(int(self._spectra_range[1]))
        new_v_axis_data = v_axis_data[min_spectrum_index:max_spectrum_index + 1]

        # Create an empty workspace with enough storage for the new data
        zeros = np.zeros(new_array_len * num_symm_spectra)
        CreateWorkspace(OutputWorkspace=temp_ws_name,
                        DataX=zeros, DataY=zeros, DataE=zeros,
                        NSpec=int(num_symm_spectra),
                        VerticalAxisUnit=v_unit, VerticalAxisValues=new_v_axis_data,
                        UnitX=x_unit)

        # Copy logs and properties from sample workspace
        CopyLogs(InputWorkspace=self._sample, OutputWorkspace=temp_ws_name)
        CopyInstrumentParameters(InputWorkspace=self._sample, OutputWorkspace=temp_ws_name)

        # For each spectrum copy positive values to the negative
        output_spectrum_index = 0
        for spectrum_no in range(self._spectra_range[0], self._spectra_range[1] + 1):
            # Get index of original spectra
            spectrum_index = mtd[self._sample].getIndexFromSpectrumNumber(spectrum_no)

            # Strip any additional array cells
            x_in = mtd[self._sample].readX(spectrum_index)[:sample_array_len]
            y_in = mtd[self._sample].readY(spectrum_index)[:sample_array_len]
            e_in = mtd[self._sample].readE(spectrum_index)[:sample_array_len]

            # Get some zeroed data to overwrite with copies from sample
            x_out = np.zeros(new_array_len)
            y_out = np.zeros(new_array_len)
            e_out = np.zeros(new_array_len)

            # Left hand side (reflected)
            x_out[:output_cut_index] = -x_in[self._positive_max_index - 1:self._positive_min_index:-1]
            y_out[:output_cut_index] = y_in[self._positive_max_index - 1:self._positive_min_index:-1]
            e_out[:output_cut_index] = e_in[self._positive_max_index - 1:self._positive_min_index:-1]

            # Right hand side (copied)
            x_out[output_cut_index:] = x_in[self._negative_min_index:self._positive_max_index]
            y_out[output_cut_index:] = y_in[self._negative_min_index:self._positive_max_index]
            e_out[output_cut_index:] = e_in[self._negative_min_index:self._positive_max_index]

            # Set output spectrum data
            mtd[temp_ws_name].setX(output_spectrum_index, x_out)
            mtd[temp_ws_name].setY(output_spectrum_index, y_out)
            mtd[temp_ws_name].setE(output_spectrum_index, e_out)

            # Set output spectrum number
            mtd[temp_ws_name].getSpectrum(output_spectrum_index).setSpectrumNo(spectrum_no)
            output_spectrum_index += 1

            logger.information('Symmetrise spectrum %d' % spectrum_no)

        RenameWorkspace(InputWorkspace=temp_ws_name, OutputWorkspace=self._output_workspace)

        if self._save:
            self._save_output()

        if self._plot:
            self._plot_output()

        if self._props_output_workspace != '':
            self._generate_props_table()

        self.setProperty('OutputWorkspace', self._output_workspace)

        EndTime('Symmetrise')