Exemplo n.º 1
0
 def _sumWorkspaces(self, workspaces, isTrans):
     """If there are multiple input workspaces, sum them and return the result. Otherwise
     just return the single input workspace, or None if the list is empty."""
     if len(workspaces) < 1:
         return None
     if len(workspaces) < 2:
         return workspaces[0]
     workspaces_without_prefixes = [
         self._removePrefix(ws, isTrans) for ws in workspaces
     ]
     concatenated_names = "+".join(workspaces_without_prefixes)
     summed_name = self._prefixedName(concatenated_names, isTrans)
     self.log().information('Summing workspaces' + " ".join(workspaces) +
                            ' into ' + summed_name)
     summed_ws = MergeRuns(InputWorkspaces=", ".join(workspaces),
                           OutputWorkspace=summed_name)
     # The reduction algorithm sets the output workspace names from the run number,
     # which by default is just the first run. Set it to the concatenated name,
     # e.g. 13461+13462
     if isinstance(summed_ws, WorkspaceGroup):
         for workspaceName in summed_ws.getNames():
             grouped_ws = AnalysisDataService.retrieve(workspaceName)
             grouped_ws.run().addProperty('run_number', concatenated_names,
                                          True)
     else:
         summed_ws.run().addProperty('run_number', concatenated_names, True)
     return summed_name
Exemplo n.º 2
0
def sum_regular_runs(workspace_names):
    """
    Sum runs with single workspace data.

    @param workspace_names List of names of input workspaces
    @return List of names of workspaces
    """
    from mantid.simpleapi import (MergeRuns, Scale, AddSampleLog,
                                  DeleteWorkspace)

    # Use the first workspace name as the result of summation
    summed_detector_ws_name = workspace_names[0]
    summed_monitor_ws_name = workspace_names[0] + '_mon'

    # Get a list of the run numbers for the original data
    run_numbers = ','.join(
        [str(mtd[ws_name].getRunNumber()) for ws_name in workspace_names])

    # Generate lists of the detector and monitor workspaces
    detector_workspaces = ','.join(workspace_names)
    monitor_workspaces = ','.join(
        [ws_name + '_mon' for ws_name in workspace_names])

    # Merge the raw workspaces
    MergeRuns(InputWorkspaces=detector_workspaces,
              OutputWorkspace=summed_detector_ws_name)
    MergeRuns(InputWorkspaces=monitor_workspaces,
              OutputWorkspace=summed_monitor_ws_name)

    # Delete old workspaces
    for idx in range(1, len(workspace_names)):
        DeleteWorkspace(workspace_names[idx])
        DeleteWorkspace(workspace_names[idx] + '_mon')

    # Derive the scale factor based on number of merged workspaces
    scale_factor = 1.0 / len(workspace_names)
    logger.information('Scale factor for summed workspaces: %f' % scale_factor)

    # Scale the new detector and monitor workspaces
    Scale(InputWorkspace=summed_detector_ws_name,
          OutputWorkspace=summed_detector_ws_name,
          Factor=scale_factor)
    Scale(InputWorkspace=summed_monitor_ws_name,
          OutputWorkspace=summed_monitor_ws_name,
          Factor=scale_factor)

    # Add the list of run numbers to the result workspace as a sample log
    AddSampleLog(Workspace=summed_detector_ws_name,
                 LogName='multi_run_numbers',
                 LogType='String',
                 LogText=run_numbers)

    # Only have the one workspace now
    return [summed_detector_ws_name]
Exemplo n.º 3
0
def sum_chopped_runs(workspace_names):
    """
    Sum runs with chopped data.
    """
    from mantid.simpleapi import (MergeRuns, Scale, DeleteWorkspace)

    try:
        num_merges = len(mtd[workspace_names[0]].getNames())
    except:
        raise RuntimeError('Not all runs have been chopped, cannot sum.')

    merges = list()

    # Generate a list of workspaces to be merged
    for idx in range(0, num_merges):
        merges.append({'detector': list(), 'monitor': list()})

        for ws_name in workspace_names:
            detector_ws_name = mtd[ws_name].getNames()[idx]
            monitor_ws_name = detector_ws_name + '_mon'

            merges[idx]['detector'].append(detector_ws_name)
            merges[idx]['monitor'].append(monitor_ws_name)

    for merge in merges:
        # Merge the chopped run segments
        MergeRuns(InputWorkspaces=','.join(merge['detector']),
                  OutputWorkspace=merge['detector'][0])
        MergeRuns(InputWorkspaces=','.join(merge['monitor']),
                  OutputWorkspace=merge['monitor'][0])

        # Scale the merged runs
        merge_size = len(merge['detector'])
        factor = 1.0 / merge_size
        Scale(InputWorkspace=merge['detector'][0],
              OutputWorkspace=merge['detector'][0],
              Factor=factor,
              Operation='Multiply')
        Scale(InputWorkspace=merge['monitor'][0],
              OutputWorkspace=merge['monitor'][0],
              Factor=factor,
              Operation='Multiply')

        # Remove the old workspaces
        for idx in range(1, merge_size):
            DeleteWorkspace(merge['detector'][idx])
            DeleteWorkspace(merge['monitor'][idx])

    # Only have the one workspace now
    return [workspace_names[0]]
Exemplo n.º 4
0
def fold_chopped(workspace_name):
    """
    Folds multiple frames of a data set into one workspace.

    @param workspace_name Name of the group to fold
    """
    from mantid.simpleapi import (MergeRuns, DeleteWorkspace, CreateWorkspace,
                                  Divide)

    workspaces = mtd[workspace_name].getNames()
    merged_ws = workspace_name + '_merged'
    MergeRuns(InputWorkspaces=','.join(workspaces), OutputWorkspace=merged_ws)

    scaling_ws = '__scaling_ws'
    unit = mtd[workspace_name].getItem(0).getAxis(0).getUnit().unitID()

    ranges = []
    for ws in mtd[workspace_name].getNames():
        x_min = mtd[ws].dataX(0)[0]
        x_max = mtd[ws].dataX(0)[-1]
        ranges.append((x_min, x_max))
        DeleteWorkspace(Workspace=ws)

    data_x = mtd[merged_ws].readX(0)
    data_y = []
    data_e = []

    for i in range(0, mtd[merged_ws].blocksize()):
        y_val = 0.0
        for rng in ranges:
            if rng[0] <= data_x[i] <= rng[1]:
                y_val += 1.0

        data_y.append(y_val)
        data_e.append(0.0)

    CreateWorkspace(OutputWorkspace=scaling_ws,
                    DataX=data_x,
                    DataY=data_y,
                    DataE=data_e,
                    UnitX=unit)

    Divide(LHSWorkspace=merged_ws,
           RHSWorkspace=scaling_ws,
           OutputWorkspace=workspace_name)

    DeleteWorkspace(Workspace=merged_ws)
    DeleteWorkspace(Workspace=scaling_ws)
Exemplo n.º 5
0
def _loadFiles(inputFilenames, wsNames, wsCleanup, algorithmLogging):
    """Load files specified by inputFilenames, merging them into a single workspace."""
    filename = inputFilenames.pop(0)
    runNumber = os.path.basename(filename).split('.')[0]
    firstWSName = wsNames.withSuffix('raw-' + runNumber)
    mergedWS = LoadILLTOF(Filename=filename,
                          OutputWorkspace=firstWSName,
                          EnableLogging=algorithmLogging)
    mergedWSName = wsNames.withSuffix('merged')
    for i, filename in enumerate(inputFilenames):
        runNumber = os.path.basename(filename).split('.')[0]
        rawWSName = wsNames.withSuffix('raw-' + runNumber)
        rawWS = LoadILLTOF(Filename=filename,
                           OutputWorkspace=rawWSName,
                           EnableLogging=algorithmLogging)
        mergedWS = MergeRuns(InputWorkspaces=[mergedWS, rawWS],
                             OutputWorkspace=mergedWSName,
                             EnableLogging=algorithmLogging)
        if i == 0:
            wsCleanup.cleanup(firstWSName)
        wsCleanup.cleanup(rawWS)
    return mergedWS
Exemplo n.º 6
0
    def PyExec(self):
        """
        Execute the algorithm in diffraction-only mode
        """

        # Load all sample, vanadium files
        ipf_file_name = 'OSIRIS_diffraction_diffonly_Parameters.xml'
        load_opts = {"DeleteMonitors": True}
        load_runs = create_loader(ipf_file_name, self._spec_min,
                                  self._spec_max, self._load_logs, load_opts)
        drange_map_generator = create_drange_map_generator(
            load_runs, rebin_and_average)

        # Create a sample drange map from the sample runs
        self._sam_ws_map = drange_map_generator(self._sample_runs,
                                                lambda name: mtd[name])

        # Create a vanadium drange map from the sample runs
        self._van_ws_map = drange_map_generator(self._vanadium_runs,
                                                lambda name: mtd[name])

        # Load the container run
        if self._container_files:
            # Scale the container run if required
            if self._container_scale_factor != 1.0:
                self._con_ws_map = drange_map_generator(
                    self._container_files,
                    lambda name: mtd[name] * self._container_scale_factor)
            else:
                self._con_ws_map = drange_map_generator(
                    self._container_files, lambda name: mtd[name])

            result_map = self._sam_ws_map.combine(self._con_ws_map,
                                                  rebin_and_subtract)
        else:
            result_map = self._sam_ws_map

        calibrator = diffraction_calibrator(self._cal)

        # Calibrate the Sample workspaces.
        result_map.transform(calibrator)

        # Calibrate the Vanadium workspaces.
        self._van_ws_map.transform(calibrator)

        # Divide all sample files by the corresponding vanadium files.
        result_map = result_map.combine(self._van_ws_map, divide_workspace)

        if len(result_map) > 1:
            # Workspaces must be added to the ADS, as there does not yet exist
            # a workspace list property (must be passed to merge runs by name).
            temp_ws_names = [
                "__run_" + str(idx) for idx in range(len(result_map))
            ]
            for temp_ws_name, sample_ws in zip(temp_ws_names,
                                               result_map.values()):
                mtd.addOrReplace(temp_ws_name, sample_ws)

            # Merge the sample files into one.
            output_ws = MergeRuns(InputWorkspaces=temp_ws_names,
                                  OutputWorkspace="merged_sample_runs",
                                  StoreInADS=False,
                                  EnableLogging=False)
            delete_workspaces(temp_ws_names)
        elif len(result_map) == 1:
            output_ws = list(result_map.values())[0]
        else:
            error_msg = "D-Ranges found in runs have no overlap:\n" + \
                        "Found Sample D-Ranges: " + ", ".join(map(str, self._sam_ws_map.keys())) + "\n" + \
                        "Found Vanadium D-Ranges: " + ", ".join(map(str, self._van_ws_map.keys()))

            if self._container_files:
                error_msg += "\nFound Container D-Ranges: " + ", ".join(
                    map(str, self._con_ws_map.keys()))
            raise RuntimeError(error_msg)

        if self._output_ws_name:
            mtd.addOrReplace(self._output_ws_name, output_ws)

        d_ranges = sorted(result_map.keys())
        AddSampleLog(Workspace=output_ws,
                     LogName="D-Ranges",
                     LogText=", ".join(map(str, d_ranges)))

        # Create scalar data to cope with where merge has combined overlapping data.
        intersections = get_intersection_of_ranges(d_ranges)

        data_x = output_ws.dataX(0)
        data_y = []
        data_e = []
        for i in range(0, len(data_x) - 1):
            x_val = (data_x[i] + data_x[i + 1]) / 2.0

            if is_in_ranges(intersections, x_val):
                data_y.append(2)
                data_e.append(2)
            else:
                data_y.append(1)
                data_e.append(1)

        # apply scalar data to result workspace
        for i in range(0, output_ws.getNumberHistograms()):
            result_y = output_ws.dataY(i)
            result_e = output_ws.dataE(i)

            result_y = result_y / data_y
            result_e = result_e / data_e

            output_ws.setY(i, result_y)
            output_ws.setE(i, result_e)

        self.setProperty("OutputWorkspace", output_ws)
    def PyExec(self):
        """
        Execute the algorithm in diffraction-only mode
        """

        # Load all sample, vanadium files
        ipf_file_name = 'OSIRIS_diffraction_diffonly_Parameters.xml'
        load_opts = {"DeleteMonitors": True}
        load_runs = create_loader(ipf_file_name, self._spec_min, self._spec_max,
                                  self._load_logs, load_opts)
        drange_map_generator = create_drange_map_generator(load_runs, rebin_and_average)

        # Create a sample drange map from the sample runs
        self._sam_ws_map = drange_map_generator(self._sample_runs, lambda name: mtd[name])

        # Create a vanadium drange map from the sample runs
        self._van_ws_map = drange_map_generator(self._vanadium_runs, lambda name: mtd[name])

        # Load the container run
        if self._container_files:
            # Scale the container run if required
            if self._container_scale_factor != 1.0:
                self._con_ws_map = drange_map_generator(self._container_files,
                                                        lambda name: mtd[name] * self._container_scale_factor)
            else:
                self._con_ws_map = drange_map_generator(self._container_files, lambda name: mtd[name])

            result_map = self._sam_ws_map.combine(self._con_ws_map, rebin_and_subtract)
        else:
            result_map = self._sam_ws_map

        calibrator = diffraction_calibrator(self._cal)

        # Calibrate the Sample workspaces.
        result_map.transform(calibrator)

        # Calibrate the Vanadium workspaces.
        self._van_ws_map.transform(calibrator)

        # Divide all sample files by the corresponding vanadium files.
        result_map = result_map.combine(self._van_ws_map, divide_workspace)

        if len(result_map) > 1:
            # Workspaces must be added to the ADS, as there does not yet exist
            # a workspace list property (must be passed to merge runs by name).
            temp_ws_names = ["__run_" + str(idx) for idx in range(len(result_map))]
            for temp_ws_name, sample_ws in zip(temp_ws_names, result_map.values()):
                mtd.addOrReplace(temp_ws_name, sample_ws)

            # Merge the sample files into one.
            output_ws = MergeRuns(InputWorkspaces=temp_ws_names,
                                  OutputWorkspace="merged_sample_runs",
                                  StoreInADS=False, EnableLogging=False)
            delete_workspaces(temp_ws_names)
        elif len(result_map) == 1:
            output_ws = list(result_map.values())[0]
        else:
            error_msg = "D-Ranges found in runs have no overlap:\n" + \
                        "Found Sample D-Ranges: " + ", ".join(map(str, self._sam_ws_map.keys())) + "\n" + \
                        "Found Vanadium D-Ranges: " + ", ".join(map(str, self._van_ws_map.keys()))

            if self._container_files:
                error_msg += "\nFound Container D-Ranges: " + ", ".join(map(str, self._con_ws_map.keys()))
            raise RuntimeError(error_msg)

        if self._output_ws_name:
            mtd.addOrReplace(self._output_ws_name, output_ws)

        d_ranges = sorted(result_map.keys())
        AddSampleLog(Workspace=output_ws, LogName="D-Ranges",
                     LogText=", ".join(map(str, d_ranges)))

        # Create scalar data to cope with where merge has combined overlapping data.
        intersections = get_intersection_of_ranges(d_ranges)

        data_x = output_ws.dataX(0)
        data_y = []
        data_e = []
        for i in range(0, len(data_x) - 1):
            x_val = (data_x[i] + data_x[i + 1]) / 2.0

            if is_in_ranges(intersections, x_val):
                data_y.append(2)
                data_e.append(2)
            else:
                data_y.append(1)
                data_e.append(1)

        # apply scalar data to result workspace
        for i in range(0, output_ws.getNumberHistograms()):
            result_y = output_ws.dataY(i)
            result_e = output_ws.dataE(i)

            result_y = result_y / data_y
            result_e = result_e / data_e

            output_ws.setY(i, result_y)
            output_ws.setE(i, result_e)

        self.setProperty("OutputWorkspace", output_ws)
Exemplo n.º 8
0
    def PyExec(self):
        runs = self.getProperty('Filename').value
        runs_as_str = self.getPropertyValue('Filename')
        number_runs = runs_as_str.count(',') + runs_as_str.count('+') + 1
        self._progress = Progress(self, start=0.0, end=1.0, nreports=number_runs)
        self._loader = self.getPropertyValue('LoaderName')
        self._version = self.getProperty('LoaderVersion').value
        self._loader_options = self.getProperty('LoaderOptions').value
        merge_options = self.getProperty('MergeRunsOptions').value
        output = self.getPropertyValue('OutputWorkspace')
        if output.startswith('__'):
            self._prefix = '__'

        # get the first run
        to_group = []
        first_run = runs[0]
        if isinstance(first_run, list):
            first_run = first_run[0]

        if self._loader == 'Load':
            # figure out the winning loader
            winning_loader = FileLoaderRegistry.Instance().chooseLoader(first_run)
            self._loader = winning_loader.name()
            self._version = winning_loader.version()
            self.setPropertyValue('LoaderName', self._loader)
            self.setProperty('LoaderVersion', self._version)

        for runs_to_sum in runs:
            if not isinstance(runs_to_sum, list):
                run = runs_to_sum
                runnumber = self._prefix + os.path.basename(run).split('.')[0]
                self._load(run, runnumber)
                to_group.append(runnumber)
            else:
                runnumbers = self._prefix
                merged = ''
                for i, run in enumerate(runs_to_sum):
                    runnumber = os.path.basename(run).split('.')[0]
                    runnumbers += '_' + runnumber
                    runnumber = self._prefix + runnumber
                    self._load(run, runnumber)
                    if i == 0:
                        merged = runnumber
                    else:
                        # we need to merge to a temp name, and rename later,
                        # since if the merged is a group workspace,
                        # it's items will be orphaned
                        tmp_merged = '__tmp_' + merged
                        MergeRuns(InputWorkspaces=[merged, runnumber],
                                  OutputWorkspace=tmp_merged, **merge_options)
                        DeleteWorkspace(Workspace=runnumber)
                        DeleteWorkspace(Workspace=merged)
                        RenameWorkspace(InputWorkspace=tmp_merged, OutputWorkspace=merged)

                runnumbers = runnumbers[1:]
                RenameWorkspace(InputWorkspace=merged, OutputWorkspace=runnumbers)
                to_group.append(runnumbers)

        if len(to_group) != 1:
            GroupWorkspaces(InputWorkspaces=to_group, OutputWorkspace=output)
        else:
            RenameWorkspace(InputWorkspace=to_group[0], OutputWorkspace=output)

        self.setProperty('OutputWorkspace', mtd[output])
 def _inputWS(self):
     """Return a raw input workspace and beam position table as tuple."""
     inputFiles = self.getProperty(Prop.RUN).value
     if len(inputFiles) > 0:
         flattened = list()
         for f in inputFiles:
             # Flatten input files into a single list
             if isinstance(f, str):
                 flattened.append(f)
             else:
                 # f is a list; concatenate.
                 flattened += f
         beamCentre = self.getProperty(Prop.BEAM_CENTRE).value
         beamAngle = self.getProperty(Prop.BEAM_ANGLE).value
         dbPosWS = ''
         if not self.getProperty(Prop.DIRECT_BEAM_POS_WS).isDefault:
             dbPosWS = self.getProperty(Prop.DIRECT_BEAM_POS_WS).value
         filename = flattened.pop(0)
         numor = os.path.basename(filename).split('.')[0]
         firstWSName = self._names.withSuffix('raw-' + numor)
         beamPosWSName = self._names.withSuffix('beamPos-' + numor)
         LoadILLReflectometry(Filename=filename,
                              OutputWorkspace=firstWSName,
                              DirectBeamPosition=dbPosWS,
                              BeamCentre=beamCentre,
                              BraggAngle=beamAngle,
                              XUnit='TimeOfFlight',
                              OutputBeamPosition=beamPosWSName,
                              EnableLogging=self._subalgLogging)
         mergedWS = mtd[firstWSName]
         beamPosWS = mtd[beamPosWSName]
         self._cleanup.cleanupLater(beamPosWS)
         mergedWSName = self._names.withSuffix('merged')
         for i, filename in enumerate(flattened):
             numor = os.path.basename(filename).split('.')[0]
             rawWSName = self._names.withSuffix('raw-' + numor)
             LoadILLReflectometry(Filename=filename,
                                  OutputWorkspace=rawWSName,
                                  DirectBeamPosition=dbPosWS,
                                  XUnit='TimeOfFlight',
                                  EnableLogging=self._subalgLogging)
             rawWS = mtd[rawWSName]
             mergedWS = MergeRuns(InputWorkspaces=[mergedWS, rawWS],
                                  OutputWorkspace=mergedWSName,
                                  EnableLogging=self._subalgLogging)
             if i == 0:
                 self._cleanup.cleanup(firstWSName)
             self._cleanup.cleanup(rawWS)
         return mergedWS, beamPosWS
     ws = self.getProperty(Prop.INPUT_WS).value
     self._cleanup.protect(ws)
     if not self.getProperty(Prop.BEAM_POS_WS).isDefault:
         beamPosWS = self.getProperty(Prop.BEAM_POS_WS).value
         self._cleanup.protect(beamPosWS)
     else:
         if not self.getProperty(Prop.BEAM_CENTRE).isDefault:
             peakPos = self.getProperty(Prop.BEAM_CENTRE).value
             beamPosWS = self._createFakePeakPositionTable(peakPos)
         else:
             # Beam position will be fitted later.
             beamPosWS = None
     return ws, beamPosWS
Exemplo n.º 10
0
    def PyExec(self):
        """
        Execute the algorithm in diffraction-only mode
        """

        # Load all sample, vanadium files
        ipf_file_name = 'OSIRIS_diffraction_diffonly_Parameters.xml'
        load_opts = {"DeleteMonitors": True}

        sample_ws_names, _ = load_files(self._sample_runs,
                                        ipf_file_name,
                                        self._spec_min,
                                        self._spec_max,
                                        load_logs=self._load_logs,
                                        load_opts=load_opts)
        # Add the sample workspaces to the sample d-range map
        self._sam_ws_map.add_workspaces(
            [mtd[sample_ws_name] for sample_ws_name in sample_ws_names],
            rebin_and_average)

        vanadium_ws_names, _ = load_files(self._vanadium_runs,
                                          ipf_file_name,
                                          self._spec_min,
                                          self._spec_max,
                                          load_logs=self._load_logs,
                                          load_opts=load_opts)
        # Add the vanadium workspaces to the vanadium drange map
        self._van_ws_map.add_workspaces(
            [mtd[vanadium_ws_name] for vanadium_ws_name in vanadium_ws_names],
            rebin_and_average)

        # Load the container run
        if self._container_files:
            container_ws_names, _ = load_files(self._container_files,
                                               ipf_file_name,
                                               self._spec_min,
                                               self._spec_max,
                                               load_logs=self._load_logs,
                                               load_opts=load_opts)

            # Scale the container run if required
            if self._container_scale_factor != 1.0:
                self._con_ws_map.add_workspaces([
                    mtd[container_ws_name] * self._container_scale_factor
                    for container_ws_name in container_ws_names
                ], rebin_and_average)
            else:
                self._con_ws_map.add_workspaces([
                    mtd[container_ws_name]
                    for container_ws_name in container_ws_names
                ], rebin_and_average)

            result_map = self._sam_ws_map.combine(self._con_ws_map,
                                                  rebin_and_subtract)
            self._delete_workspaces(container_ws_names)
        else:
            result_map = self._sam_ws_map

        # Run necessary algorithms on the Sample workspaces.
        self._calibrate_runs_in_map(result_map)

        # Run necessary algorithms on the Vanadium workspaces.
        self._calibrate_runs_in_map(self._van_ws_map)

        # Divide all sample files by the corresponding vanadium files.
        result_map = result_map.combine(self._van_ws_map, divide_workspace)

        # Workspaces in vanadium map are no longer in the ADS - can safely delete
        # vanadium workspaces in ADS.
        self._delete_workspaces(vanadium_ws_names)

        # Workspaces in sample map are no longer in the ADS - can safely delete
        # sample workspaces in the ADS.
        self._delete_workspaces(sample_ws_names)

        if len(result_map) > 1:
            # Workspaces must be added to the ADS, as there does not yet exist
            # a workspace list property (must be passed to merge runs by name).
            for sample_ws_name, sample_ws in zip(sample_ws_names,
                                                 result_map.values()):
                mtd.addOrReplace(sample_ws_name, sample_ws)

            # Merge the sample files into one.
            output_ws = MergeRuns(InputWorkspaces=sample_ws_names,
                                  OutputWorkspace="merged_sample_runs",
                                  StoreInADS=False,
                                  EnableLogging=False)
            self._delete_workspaces(sample_ws_names)
        elif len(result_map) == 1:
            output_ws = list(result_map.values())[0]
        else:
            logger.error("D-Ranges found in runs have no overlap:\n" +
                         "Found Sample D-Ranges: " +
                         ", ".join(map(str, self._sam_ws_map.keys())) + "\n" +
                         "Found Container D-Ranges: " +
                         ", ".join(map(str, self._con_ws_map.keys())) + "\n" +
                         "Found Vanadium D-Ranges: " +
                         ", ".join(map(str, self._van_ws_map.keys())))
            return

        if self._output_ws_name:
            mtd.addOrReplace(self._output_ws_name, output_ws)

        d_ranges = result_map.keys()
        AddSampleLog(Workspace=output_ws,
                     LogName="D-Ranges",
                     LogText="D-Ranges used for reduction: " +
                     ", ".join(map(str, d_ranges)))

        # Create scalar data to cope with where merge has combined overlapping data.
        intersections = get_intersection_of_ranges(d_ranges)

        data_x = output_ws.dataX(0)
        data_y = []
        data_e = []
        for i in range(0, len(data_x) - 1):
            x_val = (data_x[i] + data_x[i + 1]) / 2.0

            if is_in_ranges(intersections, x_val):
                data_y.append(2)
                data_e.append(2)
            else:
                data_y.append(1)
                data_e.append(1)

        # apply scalar data to result workspace
        for i in range(0, output_ws.getNumberHistograms()):
            result_y = output_ws.dataY(i)
            result_e = output_ws.dataE(i)

            result_y = result_y / data_y
            result_e = result_e / data_e

            output_ws.setY(i, result_y)
            output_ws.setE(i, result_e)

        self.setProperty("OutputWorkspace", output_ws)