def __accumulate(self, chunkname, sumname, chunkunfocusname, sumuunfocusname, firstrun, removelogs=False):
        """accumulate newdata `wkspname` into sum `sumwkspname` and delete `wkspname`"""
        # the first call to accumulate to a specific target should be a simple rename
        self.log().debug('__accumulate({}, {}, {}, {}, {})'.format(chunkname, sumname, chunkunfocusname,
                                                                   sumuunfocusname, firstrun))
        if chunkname == sumname:
            return  # there is nothing to be done

        if not firstrun:
            # if the sum workspace doesn't already exist, just rename
            if not mtd.doesExist(sumname):
                firstrun = True

        if firstrun:
            if chunkname != sumname:
                RenameWorkspace(InputWorkspace=chunkname, OutputWorkspace=sumname)
            if chunkunfocusname and chunkunfocusname != sumuunfocusname:
                RenameWorkspace(InputWorkspace=chunkunfocusname, OutputWorkspace=sumuunfocusname)
        else:
            if removelogs:
                RemoveLogs(Workspace=chunkname)  # accumulation has them already
            RebinToWorkspace(WorkspaceToRebin=chunkname, WorkspaceToMatch=sumname,
                             OutputWorkspace=chunkname)
            Plus(LHSWorkspace=sumname, RHSWorkspace=chunkname, OutputWorkspace=sumname,
                 ClearRHSWorkspace=self.kwargs['PreserveEvents'])
            DeleteWorkspace(Workspace=chunkname)
            self.__compressEvents(sumname)  # could be smarter about when to run

            if chunkunfocusname and chunkunfocusname != sumuunfocusname:
                if removelogs:
                    RemoveLogs(Workspace=chunkunfocusname)  # accumulation has them already
                Plus(LHSWorkspace=sumuunfocusname, RHSWorkspace=chunkunfocusname, OutputWorkspace=sumuunfocusname,
                     ClearRHSWorkspace=self.kwargs['PreserveEvents'])
                DeleteWorkspace(Workspace=chunkunfocusname)
                self.__compressEvents(sumuunfocusname)  # could be smarter about when to run
Ejemplo n.º 2
0
    def setUp(self):
        config['default.facility'] = 'ILL'
        config['default.instrument'] = 'D11'
        config['logging.loggers.root.level'] = 'Warning'
        config.appendDataSearchSubDir('ILL/D11/')
        # prepare mask for instrument edges first:
        MaskBTP(Instrument='D11', Tube='0-6,250-256')
        RenameWorkspace(InputWorkspace='D11MaskBTP',
                        OutputWorkspace='mask_vertical')
        MaskBTP(Instrument='D11', Pixel='0-6,250-256')
        Plus(LHSWorkspace='mask_vertical',
             RHSWorkspace='D11MaskBTP',
             OutputWorkspace='edge_masks')
        # the edges mask can be used as a default mask for all distances and wavelengths

        MaskBTP(Instrument='D11', Tube='114-142,', Pixel='114-142')
        RenameWorkspace(InputWorkspace='D11MaskBTP',
                        OutputWorkspace='mask_8m_4_6A_center')
        MaskBTP(Instrument='D11', Tube='3-14', Pixel='240-256')
        Plus(LHSWorkspace='D11MaskBTP',
             RHSWorkspace='mask_8m_4_6A_center',
             OutputWorkspace='mask_8m_4_6A')
        MaskBTP(Instrument='D11', Tube='103-147', Pixel='103-147')
        RenameWorkspace(InputWorkspace='D11MaskBTP',
                        OutputWorkspace='mask_1m_4_6A_center')
        MaskBTP(Instrument='D11', Tube='3-14', Pixel='240-256')
        Plus(LHSWorkspace='D11MaskBTP',
             RHSWorkspace='mask_1m_4_6A_center',
             OutputWorkspace='mask_1m_4_6A')
    def setUp(self):
        config['default.facility'] = 'ILL'
        config['default.instrument'] = 'D22'
        config['logging.loggers.root.level'] = 'Warning'
        config.appendDataSearchSubDir('ILL/D22/')

        MaskBTP(Instrument='D22', Pixel='0-12,245-255')
        RenameWorkspace(InputWorkspace='D22MaskBTP', OutputWorkspace='top_bottom')
        MaskBTP(Instrument='D22', Tube='10-31', Pixel='105-150')
        Plus(LHSWorkspace='top_bottom', RHSWorkspace='D22MaskBTP',
             OutputWorkspace='D22_mask_offset')
        MaskBTP(Instrument='D22', Tube='54-75', Pixel='108-150')
        Plus(LHSWorkspace='top_bottom', RHSWorkspace='D22MaskBTP',
             OutputWorkspace='D22_mask_central')
Ejemplo n.º 4
0
    def _load_and_sum_runs(self, spectra):
        """Load the input set of runs & sum them if there
        is more than one.
            @param spectra :: The list of spectra to load
            @returns a tuple of length 2 containing (main_detector_ws, monitor_ws)
        """
        isis = config.getFacility("ISIS")
        inst_prefix = isis.instrument("VESUVIO").shortName()

        runs = self._get_runs()

        self.summed_ws, self.summed_mon = "__loadraw_evs", "__loadraw_evs_monitors"
        for index, run in enumerate(runs):
            run = inst_prefix + str(run)
            if index == 0:
                out_name, out_mon = SUMMED_WS, SUMMED_MON
            else:
                out_name, out_mon = SUMMED_WS + 'tmp', SUMMED_MON + 'tmp'
            # Load data
            LoadRaw(Filename=run,
                    SpectrumList=spectra,
                    OutputWorkspace=out_name,
                    LoadMonitors='Exclude',
                    EnableLogging=_LOGGING_)
            LoadRaw(Filename=run,
                    SpectrumList=self._mon_spectra,
                    OutputWorkspace=out_mon,
                    EnableLogging=_LOGGING_)
            if index > 0:  # sum
                Plus(LHSWorkspace=SUMMED_WS,
                     RHSWorkspace=out_name,
                     OutputWorkspace=SUMMED_WS,
                     EnableLogging=_LOGGING_)
                Plus(LHSWorkspace=SUMMED_MON,
                     RHSWorkspace=out_mon,
                     OutputWorkspace=SUMMED_MON,
                     EnableLogging=_LOGGING_)
                DeleteWorkspace(out_name, EnableLogging=_LOGGING_)
                DeleteWorkspace(out_mon, EnableLogging=_LOGGING_)

        CropWorkspace(Inputworkspace=SUMMED_WS,
                      OutputWorkspace=SUMMED_WS,
                      XMax=self._tof_max,
                      EnableLogging=_LOGGING_)
        CropWorkspace(Inputworkspace=SUMMED_MON,
                      OutputWorkspace=SUMMED_MON,
                      XMax=self._mon_tof_max,
                      EnableLogging=_LOGGING_)
        return mtd[SUMMED_WS], mtd[SUMMED_MON]
Ejemplo n.º 5
0
    def runTest(self):
        UseCompatibilityMode()
        LOQ()
        Detector("main-detector-bank")
        csv_file = FileFinder.getFullPath('batch_input.csv')

        Set1D()
        MaskFile('MASK.094AA')
        Gravity(True)

        BatchReduce(csv_file,
                    'raw',
                    plotresults=False,
                    saveAlgs={
                        'SaveCanSAS1D': 'xml',
                        'SaveNexus': 'nxs'
                    })

        LoadNexus(Filename='54433sans.nxs', OutputWorkspace='result')
        Plus(LHSWorkspace='result',
             RHSWorkspace='99630sanotrans',
             OutputWorkspace='result')

        os.remove(
            os.path.join(config['defaultsave.directory'], '54433sans.nxs'))
        os.remove(
            os.path.join(config['defaultsave.directory'],
                         '99630sanotrans.nxs'))
        os.remove(
            os.path.join(config['defaultsave.directory'], '54433sans.xml'))
        os.remove(
            os.path.join(config['defaultsave.directory'],
                         '99630sanotrans.xml'))
Ejemplo n.º 6
0
 def monitorTransfit(self, files, foilType, divE):
     isFirstFile = True
     isSingleFile = len(files) == 1
     firstFileName = ""
     for file in files:
         discard, fileName = path.split(file)
         fnNoExt = path.splitext(fileName)[0]
         if isFirstFile:
             firstFileName = fnNoExt
         fileName_Raw = fnNoExt + '_raw'
         fileName_3 = fnNoExt + '_3'
         LoadRaw(Filename=file, OutputWorkspace=fileName_Raw)
         CropWorkspace(InputWorkspace=fileName_Raw, OutputWorkspace=fileName_Raw, XMin=100, XMax=19990)
         NormaliseByCurrent(InputWorkspace=fileName_Raw, OutputWorkspace=fileName_Raw)
         ExtractSingleSpectrum(InputWorkspace=fileName_Raw, OutputWorkspace=fileName_3, WorkspaceIndex=3)
         DeleteWorkspace(fileName_Raw)
         ConvertUnits(InputWorkspace=fileName_3, Target='Energy', OutputWorkspace=fileName_3)
         self.TransfitRebin(fileName_3, fileName_3, foilType, divE)
         if not isFirstFile:
             Plus(LHSWorkspace=firstFileName + '_3', RHSWorkspace=fileName_3, OutputWorkspace=firstFileName + '_3')
             DeleteWorkspace(fileName_3)
         else:
             isFirstFile = False
     if isSingleFile:
         RenameWorkspace(InputWorkspace=firstFileName + '_3', OutputWorkspace=firstFileName + '_monitor')
     else:
         noFiles = len(files) ** (-1)
         CreateSingleValuedWorkspace(OutputWorkspace='scale', DataValue=noFiles)
         Multiply(LHSWorkspace=firstFileName + '_3', RHSWorkspace='scale',
                  OutputWorkspace=firstFileName + '_monitor')
         DeleteWorkspace('scale')
         DeleteWorkspace(firstFileName + '_3')
Ejemplo n.º 7
0
    def _load_runs(self, runs, w_name):
        """
        Load all run event Nexus files into a single `EventWorkspace`

        Parameters
        ----------
        runs: str
            Run numbers to be reduced. Symbol `;` separates the runs into
            substrings. Each substring represents a set of runs to be
            reduced together
        w_name: str
            Name of output workspace

        Returns
        -------
        Mantid.EventsWorkspace
        """
        rl = self._run_list(runs)
        #
        # Load files together
        #
        _t_all_w = None
        _t_all_w_name = tws('aggregate_load_run')
        _t_w_name = tws('load_run')
        for run in rl:
            _t_w = self._load_single_run(run, _t_w_name)
            if _t_all_w is None:
                _t_all_w = CloneWorkspace(_t_w, OutputWorkspace=_t_all_w_name)
            else:
                _t_all_w = Plus(_t_all_w, _t_w, OutputWorkspace=_t_all_w_name)
        RenameWorkspace(_t_all_w, OutputWorkspace=w_name)
        return _t_all_w
Ejemplo n.º 8
0
 def performOperation(self):
     lhs_valid, rhs_valid, err_msg = self.validateInputs()
     if err_msg != str():
         return lhs_valid, rhs_valid, err_msg
     lhs_ws, rhs_ws = self._scale_input_workspaces()
     try:
         if self._operation == '+':
             if self._md_lhs or self._md_rhs:
                 PlusMD(LHSWorkspace=lhs_ws,
                        RHSWorkspace=rhs_ws,
                        OutputWorkspace=self._output_ws)
             else:
                 Plus(LHSWorkspace=lhs_ws,
                      RHSWorkspace=rhs_ws,
                      OutputWorkspace=self._output_ws)
         elif self._operation == '-':
             if self._md_lhs or self._md_rhs:
                 MinusMD(LHSWorkspace=lhs_ws,
                         RHSWorkspace=rhs_ws,
                         OutputWorkspace=self._output_ws)
             else:
                 Minus(LHSWorkspace=lhs_ws,
                       RHSWorkspace=rhs_ws,
                       OutputWorkspace=self._output_ws)
         elif self._operation == '*':
             if self._md_lhs or self._md_rhs:
                 MultiplyMD(LHSWorkspace=lhs_ws,
                            RHSWorkspace=rhs_ws,
                            OutputWorkspace=self._output_ws)
             else:
                 Multiply(LHSWorkspace=lhs_ws,
                          RHSWorkspace=rhs_ws,
                          OutputWorkspace=self._output_ws)
         elif self._operation == 'WM':
             if self._md_lhs or self._md_rhs:
                 WeightedMeanMD(LHSWorkspace=lhs_ws,
                                RHSWorkspace=rhs_ws,
                                OutputWorkspace=self._output_ws)
             else:
                 WeightedMean(InputWorkspace1=lhs_ws,
                              InputWorkspace2=rhs_ws,
                              OutputWorkspace=self._output_ws)
         else:
             if self._md_lhs or self._md_rhs:
                 DivideMD(LHSWorkspace=lhs_ws,
                          RHSWorkspace=rhs_ws,
                          OutputWorkspace=self._output_ws)
             else:
                 Divide(LHSWorkspace=lhs_ws,
                        RHSWorkspace=rhs_ws,
                        OutputWorkspace=self._output_ws)
     except (RuntimeError, ValueError) as err:
         return False, False, str(err)
     else:
         self._regularize_output_names(self._output_ws)
     finally:
         DeleteWorkspaces(WorkspaceList=[lhs_ws, rhs_ws])
     return True, True, ""
Ejemplo n.º 9
0
def load_and_rebin(runs: List[int],
                   output_workspace: str,
                   rebin_params: List[float],
                   banks: Optional[List[int]] = None) -> Workspace2D:
    r"""
    @brief Load a list of run numbers and rebin

    This function assumes the runs are large and events cannot be all loaded into memory. Hence, a run is loaded
    at a time, rebinned to TOF counts, events are dropped, and counts are added to the cumulative histogram
    resulting from loading the previous runs.

    @param runs : list of run numbers
    @param rebin_params : a triad of first, step, and last. A negative step indicates logarithmic binning
    @param output_workspace : the name of the output `MatrixWorkspace`
    @param banks : list of bank numbers, if one wants to load only certain banks.
    @return handle to the output workspace
    """
    instrument = 'CORELLI'
    kwargs = {} if banks is None else {
        'BankName': ','.join([f'bank{b}' for b in banks])
    }

    # Load the first run
    logger.information(
        f'Loading run {runs[0]}. {len(runs)} runs remaining to be loaded')
    LoadEventNexus(Filename=f'{instrument}_{runs[0]}',
                   OutputWorkspace=output_workspace,
                   LoadLogs=False,
                   **kwargs)
    if rebin_params is not None:
        Rebin(InputWorkspace=output_workspace,
              OutputWorkspace=output_workspace,
              Params=rebin_params,
              PreserveEvents=False)
    # Iteratively load the remaining run, adding to the final workspace each time
    try:
        single_run = '__single_run_' + output_workspace
        for i, run in enumerate(runs[1:]):
            logger.information(
                f'Loading run {run}. {len(runs) - 1 - i} runs remaining to be loaded'
            )
            LoadEventNexus(Filename=f'{instrument}_{run}',
                           OutputWorkspace=single_run,
                           LoadLogs=False,
                           **kwargs)
            if rebin_params is not None:
                Rebin(InputWorkspace=single_run,
                      OutputWorkspace=single_run,
                      Params=rebin_params,
                      PreserveEvents=False)
            Plus(LHSWorkspace=output_workspace,
                 RHSWorkspace=single_run,
                 OutputWorkspace=output_workspace)
            DeleteWorkspace(single_run)  # save memory as quick as possible
    except RuntimeError:
        DeleteWorkspace(single_run)  # a bit of clean-up
    return mtd[output_workspace]
Ejemplo n.º 10
0
 def plot_background_figure(self, ws_name):
     ws = self._loaded_workspaces[ws_name]
     ws_bg = self._background_workspaces[ws_name]
     if ws_bg:
         fig, ax = subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [2, 1]},
                            subplot_kw={'projection': 'mantid'})
         tmp = Plus(LHSWorkspace=ws_name, RHSWorkspace=ws_bg, StoreInADS=False)
         ax[0].plot(tmp, 'x')
         ax[0].plot(ws_bg, '-r')
         ax[1].plot(ws, 'x')
         fig.show()
Ejemplo n.º 11
0
def combine_loaded_runs(model, run_list, delete_added=False):
    period_list = [model._data_context.num_periods([run]) for run in run_list]
    if max(period_list) != min(period_list):
        raise RuntimeError(
            'Inconsistent periods across co-added runs. This is not supported.'
        )
    return_ws = model._loaded_data_store.get_data(
        run=[run_list[0]])["workspace"]
    running_total = []

    for index in range(min(period_list)):
        workspace = return_ws["OutputWorkspace"][index]
        running_total_item = workspace.workspace.name() + 'CoAdd'
        CloneWorkspace(InputWorkspace=workspace.workspace.name(),
                       OutputWorkspace=running_total_item)
        for run in run_list[1:]:
            ws = model._loaded_data_store.get_data(
                run=[run])["workspace"]["OutputWorkspace"][index].workspace
            Plus(LHSWorkspace=running_total_item,
                 RHSWorkspace=ws,
                 AllowDifferentNumberSpectra=False,
                 OutputWorkspace=running_total_item)

        running_total.append(running_total_item)

    return_ws_actual = {
        key: return_ws[key]
        for key in ['MainFieldDirection', 'TimeZero', 'FirstGoodData']
    }
    try:
        return_ws_actual['DetectorGroupingTable'] = return_ws[
            'DetectorGroupingTable']
    except KeyError:
        pass  # PSI Data does not include Detector Grouping table as it's read from sample logs instead
    try:
        return_ws_actual['DeadTimeTable'] = return_ws['DeadTimeTable']
    except KeyError:
        pass  # Again, PSI data does not always include DeadTimeTable either
    return_ws_actual["OutputWorkspace"] = [
        MuonWorkspaceWrapper(running_total_period)
        for running_total_period in running_total
    ]
    return_ws_actual['DataDeadTimeTable'] = CloneWorkspace(
        InputWorkspace=return_ws['DataDeadTimeTable'],
        OutputWorkspace=return_ws['DataDeadTimeTable'] + 'CoAdd').name()
    model._loaded_data_store.remove_data(
        run=flatten_run_list(run_list),
        instrument=model._data_context.instrument)
    model._loaded_data_store.add_data(
        run=flatten_run_list(run_list),
        workspace=return_ws_actual,
        filename="Co-added",
        instrument=model._data_context.instrument)
Ejemplo n.º 12
0
    def __processFile(self, filename, wkspname, file_prog_start, determineCharacterizations):
        chunks = determineChunking(filename, self.chunkSize)
        self.log().information('Processing \'%s\' in %d chunks' % (filename, len(chunks)))
        prog_per_chunk_step = self.prog_per_file * 1./(6.*float(len(chunks))) # for better progress reporting - 6 steps per chunk

        # inner loop is over chunks
        for (j, chunk) in enumerate(chunks):
            prog_start = file_prog_start + float(j) * 5. * prog_per_chunk_step
            chunkname = "%s_c%d" % (wkspname, j)
            Load(Filename=filename, OutputWorkspace=chunkname,
                 startProgress=prog_start, endProgress=prog_start+prog_per_chunk_step,
                 **chunk)
            if determineCharacterizations:
                self.__determineCharacterizations(filename, chunkname, False) # updates instance variable
                determineCharacterizations = False

            prog_start += prog_per_chunk_step
            if self.filterBadPulses > 0.:
                FilterBadPulses(InputWorkspace=chunkname, OutputWorkspace=chunkname,
                                LowerCutoff=self.filterBadPulses,
                                startProgress=prog_start, endProgress=prog_start+prog_per_chunk_step)
            prog_start += prog_per_chunk_step

            # absorption correction workspace
            if self.absorption is not None and len(str(self.absorption)) > 0:
                ConvertUnits(InputWorkspace=chunkname, OutputWorkspace=chunkname,
                             Target='Wavelength', EMode='Elastic')
                Divide(LHSWorkspace=chunkname, RHSWorkspace=self.absorption, OutputWorkspace=chunkname,
                       startProgress=prog_start, endProgress=prog_start+prog_per_chunk_step)
                ConvertUnits(InputWorkspace=chunkname, OutputWorkspace=chunkname,
                             Target='TOF', EMode='Elastic')
            prog_start += prog_per_chunk_step

            AlignAndFocusPowder(InputWorkspace=chunkname, OutputWorkspace=chunkname,
                                startProgress=prog_start, endProgress=prog_start+2.*prog_per_chunk_step,
                                **self.kwargs)
            prog_start += 2.*prog_per_chunk_step # AlignAndFocusPowder counts for two steps

            if j == 0:
                self.__updateAlignAndFocusArgs(chunkname)
                RenameWorkspace(InputWorkspace=chunkname, OutputWorkspace=wkspname)
            else:
                Plus(LHSWorkspace=wkspname, RHSWorkspace=chunkname, OutputWorkspace=wkspname,
                     ClearRHSWorkspace=self.kwargs['PreserveEvents'],
                     startProgress=prog_start, endProgress=prog_start+prog_per_chunk_step)
                DeleteWorkspace(Workspace=chunkname)
                if self.kwargs['PreserveEvents']:
                    CompressEvents(InputWorkspace=wkspname, OutputWorkspace=wkspname)
Ejemplo n.º 13
0
    def add_previous_pulse(w):
        """
        Duplicate the events but shift them by one pulse, then add to
        input workspace

        Parameters
        ----------
        w: Mantid.EventsWorkspace

        Returns
        -------
        Mantid.EventsWorkspace
        """
        pulse_width = 1.e6 / 60  # in micro-seconds
        _t_w = ScaleX(w, Factor=-pulse_width, Operation='Add')
        _t_w = Plus(w, _t_w, OutputWorkspace=w.name())
        return _t_w
Ejemplo n.º 14
0
def _integrateBkgs(ws, eppWS, sigmaMultiplier, wsNames, wsCleanup,
                   algorithmLogging):
    """Return a workspace integrated around flat background areas."""
    histogramCount = ws.getNumberHistograms()
    binMatrix = ws.extractX()
    leftBegins = binMatrix[:, 0]
    leftEnds = numpy.empty(histogramCount)
    rightBegins = numpy.empty(histogramCount)
    rightEnds = binMatrix[:, -1]
    for i in range(histogramCount):
        eppRow = eppWS.row(i)
        if eppRow['FitStatus'] != 'success':
            leftBegins[i] = 0
            leftEnds[i] = 0
            rightBegins[i] = 0
            rightEnds[i] = 0
            continue
        peakCentre = eppRow['PeakCentre']
        sigma = eppRow['Sigma']
        leftEnds[i] = peakCentre - sigmaMultiplier * sigma
        if leftBegins[i] > leftEnds[i]:
            leftBegins[i] = leftEnds[i]
        rightBegins[i] = peakCentre + sigmaMultiplier * sigma
        if rightBegins[i] > rightEnds[i]:
            rightBegins[i] = rightEnds[i]
    leftWSName = wsNames.withSuffix('integrated_left_bkgs')
    leftWS = Integration(InputWorkspace=ws,
                         OutputWorkspace=leftWSName,
                         RangeLowerList=leftBegins,
                         RangeUpperList=leftEnds,
                         EnableLogging=algorithmLogging)
    rightWSName = wsNames.withSuffix('integrated_right_bkgs')
    rightWS = Integration(InputWorkspace=ws,
                          OutputWorkspace=rightWSName,
                          RangeLowerList=rightBegins,
                          RangeUpperList=rightEnds,
                          EnableLogging=algorithmLogging)
    sumWSName = wsNames.withSuffix('integrated_bkgs_sum')
    sumWS = Plus(LHSWorkspace=leftWS,
                 RHSWorkspace=rightWS,
                 OutputWorkspace=sumWSName,
                 EnableLogging=algorithmLogging)
    wsCleanup.cleanup(leftWS)
    wsCleanup.cleanup(rightWS)
    return sumWS
    def setUp(self):
        config['default.facility'] = 'ILL'
        config['default.instrument'] = 'D11'
        config['logging.loggers.root.level'] = 'Warning'
        config.appendDataSearchSubDir('ILL/D11/')

        # prepare mask for instrument edges first:
        MaskBTP(Instrument='D11', Tube='1-3,253-256')
        RenameWorkspace(InputWorkspace='D11MaskBTP', OutputWorkspace='mask_vertical')
        MaskBTP(Instrument='D11', Pixel='1-3,253-256')
        Plus(LHSWorkspace='mask_vertical', RHSWorkspace='D11MaskBTP', OutputWorkspace='edge_masks')
        # the edges mask can be used as a default mask for all distances and wavelengths
        MaskBTP(Instrument='D11', Tube='116-139', Pixel='90-116')
        RenameWorkspace(InputWorkspace='D11MaskBTP', OutputWorkspace='mask_39m_10A')
        MaskBTP(Instrument='D11', Tube='115-140', Pixel='115-140')
        RenameWorkspace(InputWorkspace='D11MaskBTP', OutputWorkspace='mask_8m_4_6A')
        MaskBTP(Instrument='D11', Tube='105-145', Pixel='105-145')
        RenameWorkspace(InputWorkspace='D11MaskBTP', OutputWorkspace='mask_1m_4_6A')
Ejemplo n.º 16
0
    def test_unmirror_0_1_2_3(self):

        args = {
            'Run': '136553.nxs',
            'UnmirrorOption': 0,
            'OutputWorkspace': 'zero'
        }

        IndirectILLReductionQENS(**args)

        args['UnmirrorOption'] = 1

        args['OutputWorkspace'] = 'both'

        IndirectILLReductionQENS(**args)

        args['UnmirrorOption'] = 2

        args['OutputWorkspace'] = 'left'

        IndirectILLReductionQENS(**args)

        args['UnmirrorOption'] = 3

        args['OutputWorkspace'] = 'right'

        IndirectILLReductionQENS(**args)

        summed = Plus(mtd['left_red'].getItem(0), mtd['right_red'].getItem(0))

        Scale(InputWorkspace=summed, Factor=0.5, OutputWorkspace=summed)

        result = CompareWorkspaces(summed, mtd['both_red'].getItem(0))

        self.assertTrue(result[0], "Unmirror 1 should be the sum of 2 and 3")

        left_right = GroupWorkspaces([
            mtd['left_red'].getItem(0).getName(),
            mtd['right_red'].getItem(0).getName()
        ])

        result = CompareWorkspaces(left_right, 'zero_red')

        self.assertTrue(result[0], "Unmirror 0 should be the group of 2 and 3")
Ejemplo n.º 17
0
def combine_loaded_runs(model, run_list, delete_added=False):
    return_ws = model._loaded_data_store.get_data(
        run=[run_list[0]])["workspace"]
    running_total = []

    for index, workspace in enumerate(return_ws["OutputWorkspace"]):
        running_total_item = workspace.workspace.name() + 'CoAdd'
        CloneWorkspace(InputWorkspace=workspace.workspace.name(),
                       OutputWorkspace=running_total_item)
        for run in run_list[1:]:
            ws = model._loaded_data_store.get_data(
                run=[run])["workspace"]["OutputWorkspace"][index].workspace
            Plus(LHSWorkspace=running_total_item,
                 RHSWorkspace=ws,
                 AllowDifferentNumberSpectra=False,
                 OutputWorkspace=running_total_item)

        running_total.append(running_total_item)

    return_ws_actual = {
        key: return_ws[key]
        for key in [
            'MainFieldDirection', 'TimeZero', 'FirstGoodData', 'DeadTimeTable',
            'DetectorGroupingTable'
        ]
    }
    return_ws_actual["OutputWorkspace"] = [
        MuonWorkspaceWrapper(running_total_period)
        for running_total_period in running_total
    ]
    return_ws_actual['DataDeadTimeTable'] = CloneWorkspace(
        InputWorkspace=return_ws['DataDeadTimeTable'],
        OutputWorkspace=return_ws['DataDeadTimeTable'] + 'CoAdd').name()
    model._loaded_data_store.remove_data(
        run=flatten_run_list(run_list),
        instrument=model._data_context.instrument)
    model._loaded_data_store.add_data(
        run=flatten_run_list(run_list),
        workspace=return_ws_actual,
        filename="Co-added",
        instrument=model._data_context.instrument)
    def PyExec(self):
        filenames = self._getLinearizedFilenames('Filename')
        self.filterBadPulses = self.getProperty('FilterBadPulses').value
        self.chunkSize = self.getProperty('MaxChunkSize').value
        self.absorption = self.getProperty('AbsorptionWorkspace').value
        self.charac = self.getProperty('Characterizations').value
        finalname = self.getProperty('OutputWorkspace').valueAsStr

        self.prog_per_file = 1. / float(
            len(filenames))  # for better progress reporting

        # these are also passed into the child-algorithms
        self.kwargs = self.__getAlignAndFocusArgs()

        # outer loop creates chunks to load
        for (i, filename) in enumerate(filenames):
            # default name is based off of filename
            wkspname = os.path.split(filename)[-1].split('.')[0]
            self.__determineCharacterizations(
                filename, wkspname)  # updates instance variable
            cachefile = self.__getCacheName(wkspname)
            wkspname += '_f%d' % i  # add file number to be unique

            if cachefile is not None and os.path.exists(cachefile):
                LoadNexusProcessed(Filename=cachefile,
                                   OutputWorkspace=wkspname)
                # TODO LoadNexusProcessed has a bug. When it finds the
                # instrument name without xml it reads in from an IDF
                # in the instrument directory.
                editinstrargs = {}
                for name in PROPS_FOR_INSTR:
                    prop = self.getProperty(name)
                    if not prop.isDefault:
                        editinstrargs[name] = prop.value
                if editinstrargs:
                    EditInstrumentGeometry(Workspace=wkspname, **editinstrargs)
            else:
                self.__processFile(filename, wkspname,
                                   self.prog_per_file * float(i))
                if cachefile is not None:
                    SaveNexusProcessed(InputWorkspace=wkspname,
                                       Filename=cachefile)

            # accumulate runs
            if i == 0:
                if wkspname != finalname:
                    RenameWorkspace(InputWorkspace=wkspname,
                                    OutputWorkspace=finalname)
            else:
                Plus(LHSWorkspace=finalname,
                     RHSWorkspace=wkspname,
                     OutputWorkspace=finalname,
                     ClearRHSWorkspace=self.kwargs['PreserveEvents'])
                DeleteWorkspace(Workspace=wkspname)
                if self.kwargs['PreserveEvents']:
                    CompressEvents(InputWorkspace=finalname,
                                   OutputWorkspace=finalname)

        # with more than one chunk or file the integrated proton charge is
        # generically wrong
        mtd[finalname].run().integrateProtonCharge()

        # set the output workspace
        self.setProperty('OutputWorkspace', mtd[finalname])
Ejemplo n.º 19
0
 def undo_background_subtraction(self, ws_name, isBGsub=False):
     self._bg_params[ws_name][0] = isBGsub  # must do this before plotting which refreshes table
     Plus(LHSWorkspace=ws_name, RHSWorkspace=self.get_background_workspaces()[ws_name],
          OutputWorkspace=ws_name)
Ejemplo n.º 20
0
    def PyExec(self):
        """Executes the data reduction workflow."""
        progress = Progress(self, 0.0, 1.0, 7)
        report = common.Report()
        subalgLogging = self.getProperty(
            common.PROP_SUBALG_LOGGING).value == common.SUBALG_LOGGING_ON
        wsNamePrefix = self.getProperty(common.PROP_OUTPUT_WS).valueAsStr
        cleanupMode = self.getProperty(common.PROP_CLEANUP_MODE).value
        wsNames = common.NameSource(wsNamePrefix, cleanupMode)
        wsCleanup = common.IntermediateWSCleanup(cleanupMode, subalgLogging)

        progress.report('Loading inputs')
        mainWS = self._inputWS(wsNames, wsCleanup, subalgLogging)

        maskWSName = wsNames.withSuffix('combined_mask')
        maskWS = _createMaskWS(mainWS, maskWSName, subalgLogging)
        wsCleanup.cleanupLater(maskWS)

        reportWS = None
        if not self.getProperty(
                common.PROP_OUTPUT_DIAGNOSTICS_REPORT_WS).isDefault:
            reportWSName = self.getProperty(
                common.PROP_OUTPUT_DIAGNOSTICS_REPORT_WS).valueAsStr
            reportWS = _createDiagnosticsReportTable(
                reportWSName, mainWS.getNumberHistograms(), subalgLogging)

        progress.report('Loading default mask')
        defaultMaskWS = self._defaultMask(mainWS, wsNames, wsCleanup, report,
                                          subalgLogging)
        defaultMaskedSpectra = set()
        if defaultMaskWS is not None:
            defaultMaskedSpectra = _reportDefaultMask(reportWS, defaultMaskWS)
            maskWS = Plus(LHSWorkspace=maskWS,
                          RHSWorkspace=defaultMaskWS,
                          EnableLogging=subalgLogging)
            wsCleanup.cleanup(defaultMaskWS)

        progress.report('User-defined mask')
        userMaskWS = self._userMask(mainWS, wsNames, wsCleanup, subalgLogging)
        maskWS = Plus(LHSWorkspace=maskWS,
                      RHSWorkspace=userMaskWS,
                      EnableLogging=subalgLogging)
        wsCleanup.cleanup(userMaskWS)

        beamStopMaskedSpectra = set()
        if self._beamStopDiagnosticsEnabled(mainWS, report):
            progress.report('Diagnosing beam stop')
            beamStopMaskWS = self._beamStopDiagnostics(mainWS, maskWS, wsNames,
                                                       wsCleanup, report,
                                                       subalgLogging)
            beamStopMaskedSpectra = _reportBeamStopMask(
                reportWS, beamStopMaskWS)
            maskWS = Plus(LHSWorkspace=maskWS,
                          RHSWorkspace=beamStopMaskWS,
                          EnableLogging=subalgLogging)
            wsCleanup.cleanup(beamStopMaskWS)

        bkgMaskedSpectra = set()
        if self._bkgDiagnosticsEnabled(mainWS, report):
            progress.report('Diagnosing backgrounds')
            bkgMaskWS, bkgWS = self._bkgDiagnostics(mainWS, wsNames, wsCleanup,
                                                    report, subalgLogging)
            bkgMaskedSpectra = _reportBkgDiagnostics(reportWS, bkgWS,
                                                     bkgMaskWS)
            maskWS = Plus(LHSWorkspace=maskWS,
                          RHSWorkspace=bkgMaskWS,
                          EnableLogging=subalgLogging)
            wsCleanup.cleanup(bkgMaskWS)
            wsCleanup.cleanup(bkgWS)

        peakMaskedSpectra = set()
        if self._peakDiagnosticsEnabled(mainWS, report):
            progress.report('Diagnosing peaks')
            peakMaskWS, peakIntensityWS = self._peakDiagnostics(
                mainWS, wsNames, wsCleanup, report, subalgLogging)
            peakMaskedSpectra = _reportPeakDiagnostics(reportWS,
                                                       peakIntensityWS,
                                                       peakMaskWS)
            maskWS = Plus(LHSWorkspace=maskWS,
                          RHSWorkspace=peakMaskWS,
                          EnableLogging=subalgLogging)
            wsCleanup.cleanup(peakMaskWS)
            wsCleanup.cleanup(peakIntensityWS)

        self._outputReports(reportWS, defaultMaskedSpectra,
                            beamStopMaskedSpectra, peakMaskedSpectra,
                            bkgMaskedSpectra)

        self._finalize(maskWS, wsCleanup, report)
        progress.report('Done')
Ejemplo n.º 21
0
    def PyExec(self):
        filenames = self._getLinearizedFilenames('Filename')
        self.filterBadPulses = self.getProperty('FilterBadPulses').value
        self.chunkSize = self.getProperty('MaxChunkSize').value
        self.absorption = self.getProperty('AbsorptionWorkspace').value
        self.charac = self.getProperty('Characterizations').value
        finalname = self.getPropertyValue('OutputWorkspace')
        useCaching = len(self.getProperty('CacheDir').value) > 0

        # accumulate the unfocused workspace if it was requested
        # empty string means it is not used
        unfocusname = self.getPropertyValue('UnfocussedWorkspace')
        unfocusname_file = ''
        if len(unfocusname) > 0:
            unfocusname_file = '__{}_partial'.format(unfocusname)

        if useCaching:
            # unfocus check only matters if caching is requested
            if unfocusname != '':
                self.log().warning(
                    'CacheDir is specified with "UnfocussedWorkspace" - reading cache files disabled'
                )
        else:
            self.log().warning(
                'CacheDir is not specified - functionality disabled')

        self.prog_per_file = 1. / float(
            len(filenames))  # for better progress reporting

        # these are also passed into the child-algorithms
        self.kwargs = self.__getAlignAndFocusArgs()

        # outer loop creates chunks to load
        for (i, filename) in enumerate(filenames):
            # default name is based off of filename
            wkspname = os.path.split(filename)[-1].split('.')[0]

            if useCaching:
                self.__determineCharacterizations(
                    filename, wkspname, True)  # updates instance variable
                cachefile = self.__getCacheName(wkspname)
            else:
                cachefile = None

            wkspname += '_f%d' % i  # add file number to be unique

            # if the unfocussed data is requested, don't read it from disk
            # because of the extra complication of the unfocussed workspace
            if useCaching and os.path.exists(cachefile) and unfocusname == '':
                LoadNexusProcessed(Filename=cachefile,
                                   OutputWorkspace=wkspname)
                # TODO LoadNexusProcessed has a bug. When it finds the
                # instrument name without xml it reads in from an IDF
                # in the instrument directory.
                editinstrargs = {}
                for name in PROPS_FOR_INSTR:
                    prop = self.getProperty(name)
                    if not prop.isDefault:
                        editinstrargs[name] = prop.value
                if editinstrargs:
                    EditInstrumentGeometry(Workspace=wkspname, **editinstrargs)
            else:
                self.__processFile(filename, wkspname, unfocusname_file,
                                   self.prog_per_file * float(i),
                                   not useCaching)

                # write out the cachefile for the main reduced data independent of whether
                # the unfocussed workspace was requested
                if useCaching:
                    SaveNexusProcessed(InputWorkspace=wkspname,
                                       Filename=cachefile)

            # accumulate runs
            if i == 0:
                if wkspname != finalname:
                    RenameWorkspace(InputWorkspace=wkspname,
                                    OutputWorkspace=finalname)
                if unfocusname != '':
                    RenameWorkspace(InputWorkspace=unfocusname_file,
                                    OutputWorkspace=unfocusname)
            else:
                Plus(LHSWorkspace=finalname,
                     RHSWorkspace=wkspname,
                     OutputWorkspace=finalname,
                     ClearRHSWorkspace=self.kwargs['PreserveEvents'])
                DeleteWorkspace(Workspace=wkspname)

                if unfocusname != '':
                    Plus(LHSWorkspace=unfocusname,
                         RHSWorkspace=unfocusname_file,
                         OutputWorkspace=unfocusname,
                         ClearRHSWorkspace=self.kwargs['PreserveEvents'])
                    DeleteWorkspace(Workspace=unfocusname_file)

                if self.kwargs['PreserveEvents']:
                    CompressEvents(InputWorkspace=finalname,
                                   OutputWorkspace=finalname)
                    # not compressing unfocussed workspace because it is in d-spacing
                    # and is likely to be from a different part of the instrument

        # with more than one chunk or file the integrated proton charge is
        # generically wrong
        mtd[finalname].run().integrateProtonCharge()

        # set the output workspace
        self.setProperty('OutputWorkspace', mtd[finalname])
        if unfocusname != '':
            self.setProperty('UnfocussedWorkspace', mtd[unfocusname])
Ejemplo n.º 22
0
    def __processFile(self, filename, wkspname, unfocusname, file_prog_start,
                      determineCharacterizations):
        chunks = determineChunking(filename, self.chunkSize)
        numSteps = 6  # for better progress reporting - 6 steps per chunk
        if unfocusname != '':
            numSteps = 7  # one more for accumulating the unfocused workspace
        self.log().information('Processing \'{}\' in {:d} chunks'.format(
            filename, len(chunks)))
        prog_per_chunk_step = self.prog_per_file * 1. / (numSteps *
                                                         float(len(chunks)))
        unfocusname_chunk = ''

        # inner loop is over chunks
        for (j, chunk) in enumerate(chunks):
            prog_start = file_prog_start + float(j) * float(
                numSteps - 1) * prog_per_chunk_step
            chunkname = '{}_c{:d}'.format(wkspname, j)
            if unfocusname != '':  # only create unfocus chunk if needed
                unfocusname_chunk = '{}_c{:d}'.format(unfocusname, j)

            Load(Filename=filename,
                 OutputWorkspace=chunkname,
                 startProgress=prog_start,
                 endProgress=prog_start + prog_per_chunk_step,
                 **chunk)
            if determineCharacterizations:
                self.__determineCharacterizations(
                    filename, chunkname, False)  # updates instance variable
                determineCharacterizations = False

            prog_start += prog_per_chunk_step
            if self.filterBadPulses > 0.:
                FilterBadPulses(InputWorkspace=chunkname,
                                OutputWorkspace=chunkname,
                                LowerCutoff=self.filterBadPulses,
                                startProgress=prog_start,
                                endProgress=prog_start + prog_per_chunk_step)
            prog_start += prog_per_chunk_step

            # absorption correction workspace
            if self.absorption is not None and len(str(self.absorption)) > 0:
                ConvertUnits(InputWorkspace=chunkname,
                             OutputWorkspace=chunkname,
                             Target='Wavelength',
                             EMode='Elastic')
                Divide(LHSWorkspace=chunkname,
                       RHSWorkspace=self.absorption,
                       OutputWorkspace=chunkname,
                       startProgress=prog_start,
                       endProgress=prog_start + prog_per_chunk_step)
                ConvertUnits(InputWorkspace=chunkname,
                             OutputWorkspace=chunkname,
                             Target='TOF',
                             EMode='Elastic')
            prog_start += prog_per_chunk_step

            AlignAndFocusPowder(InputWorkspace=chunkname,
                                OutputWorkspace=chunkname,
                                UnfocussedWorkspace=unfocusname_chunk,
                                startProgress=prog_start,
                                endProgress=prog_start +
                                2. * prog_per_chunk_step,
                                **self.kwargs)
            prog_start += 2. * prog_per_chunk_step  # AlignAndFocusPowder counts for two steps

            if j == 0:
                self.__updateAlignAndFocusArgs(chunkname)
                RenameWorkspace(InputWorkspace=chunkname,
                                OutputWorkspace=wkspname)
                if unfocusname != '':
                    RenameWorkspace(InputWorkspace=unfocusname_chunk,
                                    OutputWorkspace=unfocusname)
            else:
                Plus(LHSWorkspace=wkspname,
                     RHSWorkspace=chunkname,
                     OutputWorkspace=wkspname,
                     ClearRHSWorkspace=self.kwargs['PreserveEvents'],
                     startProgress=prog_start,
                     endProgress=prog_start + prog_per_chunk_step)
                DeleteWorkspace(Workspace=chunkname)

                if unfocusname != '':
                    Plus(LHSWorkspace=unfocusname,
                         RHSWorkspace=unfocusname_chunk,
                         OutputWorkspace=unfocusname,
                         ClearRHSWorkspace=self.kwargs['PreserveEvents'],
                         startProgress=prog_start,
                         endProgress=prog_start + prog_per_chunk_step)
                    DeleteWorkspace(Workspace=unfocusname_chunk)

                if self.kwargs['PreserveEvents']:
                    CompressEvents(InputWorkspace=wkspname,
                                   OutputWorkspace=wkspname)
Ejemplo n.º 23
0
    def __processFile(self, filename, wkspname, unfocusname, file_prog_start,
                      determineCharacterizations):
        chunks = determineChunking(filename, self.chunkSize)
        numSteps = 6  # for better progress reporting - 6 steps per chunk
        if unfocusname != '':
            numSteps = 7  # one more for accumulating the unfocused workspace
        self.log().information('Processing \'{}\' in {:d} chunks'.format(
            filename, len(chunks)))
        prog_per_chunk_step = self.prog_per_file * 1. / (numSteps *
                                                         float(len(chunks)))
        unfocusname_chunk = ''
        canSkipLoadingLogs = False

        # inner loop is over chunks
        for (j, chunk) in enumerate(chunks):
            prog_start = file_prog_start + float(j) * float(
                numSteps - 1) * prog_per_chunk_step
            chunkname = '{}_c{:d}'.format(wkspname, j)
            if unfocusname != '':  # only create unfocus chunk if needed
                unfocusname_chunk = '{}_c{:d}'.format(unfocusname, j)

            # load a chunk - this is a bit crazy long because we need to get an output property from `Load` when it
            # is run and the algorithm history doesn't exist until the parent algorithm (this) has finished
            loader = self.__createLoader(filename,
                                         chunkname,
                                         progstart=prog_start,
                                         progstop=prog_start +
                                         prog_per_chunk_step)
            if canSkipLoadingLogs:
                loader.setProperty('LoadLogs', False)
            for key, value in chunk.items():
                if isinstance(value, str):
                    loader.setPropertyValue(key, value)
                else:
                    loader.setProperty(key, value)
            loader.execute()

            # copy the necessary logs onto the workspace
            if canSkipLoadingLogs:
                CopyLogs(InputWorkspace=wkspname,
                         OutputWorkspace=chunkname,
                         MergeStrategy='WipeExisting')

            # get the underlying loader name if we used the generic one
            if self.__loaderName == 'Load':
                self.__loaderName = loader.getPropertyValue('LoaderName')
            canSkipLoadingLogs = self.__loaderName == 'LoadEventNexus'

            if determineCharacterizations and j == 0:
                self.__determineCharacterizations(
                    filename, chunkname)  # updates instance variable
                determineCharacterizations = False

            prog_start += prog_per_chunk_step
            if self.filterBadPulses > 0.:
                FilterBadPulses(InputWorkspace=chunkname,
                                OutputWorkspace=chunkname,
                                LowerCutoff=self.filterBadPulses,
                                startProgress=prog_start,
                                endProgress=prog_start + prog_per_chunk_step)
            prog_start += prog_per_chunk_step

            # absorption correction workspace
            if self.absorption is not None and len(str(self.absorption)) > 0:
                ConvertUnits(InputWorkspace=chunkname,
                             OutputWorkspace=chunkname,
                             Target='Wavelength',
                             EMode='Elastic')
                Divide(LHSWorkspace=chunkname,
                       RHSWorkspace=self.absorption,
                       OutputWorkspace=chunkname,
                       startProgress=prog_start,
                       endProgress=prog_start + prog_per_chunk_step)
                ConvertUnits(InputWorkspace=chunkname,
                             OutputWorkspace=chunkname,
                             Target='TOF',
                             EMode='Elastic')
            prog_start += prog_per_chunk_step

            AlignAndFocusPowder(InputWorkspace=chunkname,
                                OutputWorkspace=chunkname,
                                UnfocussedWorkspace=unfocusname_chunk,
                                startProgress=prog_start,
                                endProgress=prog_start +
                                2. * prog_per_chunk_step,
                                **self.kwargs)
            prog_start += 2. * prog_per_chunk_step  # AlignAndFocusPowder counts for two steps

            if j == 0:
                self.__updateAlignAndFocusArgs(chunkname)
                RenameWorkspace(InputWorkspace=chunkname,
                                OutputWorkspace=wkspname)
                if unfocusname != '':
                    RenameWorkspace(InputWorkspace=unfocusname_chunk,
                                    OutputWorkspace=unfocusname)
            else:
                RemoveLogs(
                    Workspace=chunkname)  # accumulation has them already
                Plus(LHSWorkspace=wkspname,
                     RHSWorkspace=chunkname,
                     OutputWorkspace=wkspname,
                     ClearRHSWorkspace=self.kwargs['PreserveEvents'],
                     startProgress=prog_start,
                     endProgress=prog_start + prog_per_chunk_step)
                DeleteWorkspace(Workspace=chunkname)

                if unfocusname != '':
                    RemoveLogs(Workspace=unfocusname_chunk
                               )  # accumulation has them already
                    Plus(LHSWorkspace=unfocusname,
                         RHSWorkspace=unfocusname_chunk,
                         OutputWorkspace=unfocusname,
                         ClearRHSWorkspace=self.kwargs['PreserveEvents'],
                         startProgress=prog_start,
                         endProgress=prog_start + prog_per_chunk_step)
                    DeleteWorkspace(Workspace=unfocusname_chunk)

                if self.kwargs['PreserveEvents'] and self.kwargs[
                        'CompressTolerance'] > 0.:
                    CompressEvents(InputWorkspace=wkspname,
                                   OutputWorkspace=wkspname,
                                   WallClockTolerance=self.
                                   kwargs['CompressWallClockTolerance'],
                                   Tolerance=self.kwargs['CompressTolerance'],
                                   StartTime=self.kwargs['CompressStartTime'])
Ejemplo n.º 24
0
    def _flux_normalization(self, w, target):
        """
        Divide data by integrated flux intensity

        Parameters
        ----------
        w: Mantid.EventsWorkspace
            Input workspace
        target: str
            Specify the entity the workspace refers to. Valid options are
            'sample', 'background', and 'vanadium'

        Returns
        -------
        Mantid.EventWorkspace
        """
        valid_targets = ('sample', 'background', 'vanadium')
        if target not in valid_targets:
            raise KeyError('Target must be one of ' + ', '.join(valid_targets))
        w_nor = None
        if self._flux_normalization_type == 'Monitor':
            _t_flux = None
            _t_flux_name = tws('monitor_aggregate')
            target_to_runs = dict(sample='RunNumbers',
                                  background='BackgroundRuns',
                                  vanadium='VanadiumRuns')
            rl = self._run_list(self.getProperty(target_to_runs[target]).value)

            _t_w_name = tws('monitor')
            for run in rl:
                run_name = '{0}_{1}'.format(self._short_inst, str(run))
                _t_w = LoadNexusMonitors(run_name, OutputWorkspace=_t_w_name)
                if _t_flux is None:
                    _t_flux = CloneWorkspace(_t_w,
                                             OutputWorkspace=_t_flux_name)
                else:
                    _t_flux = Plus(_t_flux, _t_w, OutputWorkspace=_t_flux_name)

            _t_flux = ConvertUnits(_t_flux,
                                   Target='Wavelength',
                                   Emode='Elastic',
                                   OutputWorkspace=_t_flux_name)
            _t_flux = CropWorkspace(_t_flux,
                                    XMin=self._wavelength_band[0],
                                    XMax=self._wavelength_band[1],
                                    OutputWorkspace=_t_flux_name)
            _t_flux = OneMinusExponentialCor(_t_flux,
                                             C='0.20749999999999999',
                                             C1='0.001276',
                                             OutputWorkspace=_t_flux_name)
            _t_flux = Scale(_t_flux,
                            Factor='1e-06',
                            Operation='Multiply',
                            OutputWorkspace=_t_flux_name)
            _t_flux = Integration(_t_flux,
                                  RangeLower=self._wavelength_band[0],
                                  RangeUpper=self._wavelength_band[1],
                                  OutputWorkspace=_t_flux_name)
            w_nor = Divide(w, _t_flux, OutputWorkspace=w.name())
        else:
            aggregate_flux = None
            if self._flux_normalization_type == 'Proton Charge':
                aggregate_flux = w.getRun().getProtonCharge()
            elif self._flux_normalization_type == 'Duration':
                aggregate_flux = w.getRun().getProperty('duration').value
            w_nor = Scale(w,
                          Operation='Multiply',
                          Factor=1.0 / aggregate_flux,
                          OutputWorkspace=w.name())
        return w_nor