Example #1
0
    def reducer(self, callback=None):
        """
        Reduce all the entries in reduction_entries

        Parameters
        ----------
        callback : callable
            Function, `f(percent_finished)` that is called with the current
            percentage progress of the reduction
        """

        # refnx.reduce.reduce needs you to be in the directory where you're
        # going to write files to
        if self.output_directory:
            os.chdir(self.output_directory)

        # if no data directory was specified then assume it's the cwd
        data_directory = self.data_directory
        if not data_directory:
            data_directory = "./"

        def full_path(fname):
            f = os.path.join(data_directory, fname)
            return f

        # if the streamed directory isn't mentioned then assume it's the same
        # as the data directory
        streamed_directory = self.streamed_directory
        if not os.path.isdir(streamed_directory):
            self.streamed_directory = data_directory

        logging.info("-------------------------------------------------------"
                     "\nStarting reduction run")
        logging.info(
            "data_folder={data_directory}, trim_trailing=True, "
            "lo_wavelength={low_wavelength}, "
            "hi_wavelength={high_wavelength}, "
            "rebin_percent={rebin_percent}, "
            "normalise={monitor_normalisation}, "
            "background={background_subtraction} "
            "eventmode={streamed_reduction} "
            "event_folder={streamed_directory}".format(**self.__dict__))

        # sets up time slices for event reduction
        if self.streamed_reduction:
            eventmode = np.arange(self.stream_start, self.stream_end,
                                  self.stream_duration)
            eventmode = np.r_[eventmode, self.stream_end]
        else:
            eventmode = None

        # are you manual beamfinding?
        peak_pos = None
        if self.manual_beam_find and self.manual_beam_finder is not None:
            peak_pos = -1

        idx = 0

        cached_direct_beams = {}

        for row, val in self.reduction_entries.items():
            if not val["use"]:
                continue

            flood = None
            if val["flood"]:
                flood = full_path(val["flood"])

            combined_dataset = None

            # process entries one by one
            for ref, db in zip(
                ["reflect-1", "reflect-2", "reflect-3"],
                ["direct-1", "direct-2", "direct-3"],
            ):
                reflect = val[ref]
                direct = val[db]

                # if the file doesn't exist there's no point continuing
                if (not os.path.isfile(full_path(reflect))) or (
                        not os.path.isfile(full_path(direct))):
                    continue

                # which of the nspectra to reduce (or all)
                ref_pn = PlatypusNexus(full_path(reflect))

                if direct not in cached_direct_beams:
                    cached_direct_beams[direct] = PlatypusReduce(
                        direct, data_folder=data_directory)

                reducer = cached_direct_beams[direct]

                try:
                    reduced = reducer(
                        ref_pn,
                        scale=val["scale"],
                        h5norm=flood,
                        lo_wavelength=self.low_wavelength,
                        hi_wavelength=self.high_wavelength,
                        rebin_percent=self.rebin_percent,
                        normalise=self.monitor_normalisation,
                        background=self.background_subtraction,
                        manual_beam_find=self.manual_beam_finder,
                        peak_pos=peak_pos,
                        eventmode=eventmode,
                        event_folder=streamed_directory,
                    )
                except Exception as e:
                    # typical Exception would be ValueError for non overlapping
                    # angles
                    logging.info(e)
                    continue

                logging.info("Reduced {} vs {}, scale={}, angle={}".format(
                    reflect,
                    direct,
                    val["scale"],
                    reduced[1]["omega"][0, 0],
                ))

                if combined_dataset is None:
                    combined_dataset = ReflectDataset()

                    fname = basename_datafile(reflect)
                    fname_dat = os.path.join(self.output_directory,
                                             "c_{0}.dat".format(fname))
                    fname_xml = os.path.join(self.output_directory,
                                             "c_{0}.xml".format(fname))

                try:
                    combined_dataset.add_data(
                        reducer.data(),
                        requires_splice=True,
                        trim_trailing=True,
                    )
                except ValueError as e:
                    # datasets don't overlap
                    logging.info(e)
                    continue

            if combined_dataset is not None:
                # after you've finished reducing write a combined file.
                with open(fname_dat, "wb") as f:
                    combined_dataset.save(f)
                with open(fname_xml, "wb") as f:
                    combined_dataset.save_xml(f)
                logging.info("Written combined files: {} and {}".format(
                    fname_dat, fname_xml))

            # can be used to create a progress bar
            idx += 1
            if callback is not None:
                ok = callback(100 * idx / len(self.reduction_entries))
                if not ok:
                    break

        logging.info("\nFinished reduction run"
                     "-------------------------------------------------------")
Example #2
0
def reduce_stitch(reflect_list,
                  direct_list,
                  background_list=None,
                  norm_file_num=None,
                  data_folder=None,
                  prefix='PLP',
                  trim_trailing=True,
                  save=True,
                  **kwds):
    """
    Reduces a list of reflected beam run numbers and a list of corresponding
    direct beam run numbers from the Platypus reflectometer. If there are
    multiple reflectivity files they are spliced together.

    Parameters
    ----------
    reflect_list : list
        Reflected beam run numbers, e.g. `[708, 709, 710]`
        708 corresponds to the file PLP0000708.nx.hdf.
    direct_list : list
        Direct beam run numbers, e.g. `[711, 711, 711]`
    background_list : list, optional
        List of `bool` to control whether background subtraction is used
        for each reduction, e.g. `[False, True, True]`. The default is to do
        a background subtraction on all runs.
    norm_file_num : int, optional
        The run number for the water flood field correction.
    data_folder : str, optional
        Where is the raw data stored?
    prefix : str, optional
        The instrument filename prefix.
    trim_trailing : bool, optional
        When datasets are spliced together do you want to remove points in the
        overlap region from the preceding dataset?
    save : bool, optional
        If `True` then the spliced file is written to a file (in the working
        directory) with a name like: `c_PLP0000708.dat`.
    kwds : dict, optional
        Options passed directly to `refnx.reduce.platypusnexus.process`,
        for processing of individual spectra. Look at that method docstring
        for specification of options.

    Returns
    -------
    combined_dataset, reduced_filename : refnx.dataset.ReflectDataset, str
        The combined dataset and the file name of the reduced data, if it was
        saved. If it wasn't saved `reduced_filename` is `None`.

    Notes
    -----
    If `background` is in the supplied `kwds` it is ignored.
    The `prefix` is used to specify the run numbers to a filename.
    For example a run number of 10, and a prefix of `PLP` resolves to a
    NeXus filename of 'PLP0000010.nx.hdf'.

    Examples
    --------

    >>> from refnx.reduce import reduce_stitch
    >>> dataset, fname = reduce_stitch([708, 709, 710],
    ...                                [711, 711, 711],
    ...                                 rebin_percent=2)

    """

    scale = kwds.get('scale', 1.)

    kwds_copy = {}
    kwds_copy.update(kwds)
    kwds_copy.pop('background', None)

    if not background_list:
        background_list = [True] * len(reflect_list)

    # now reduce all the files.
    zipped = zip(reflect_list, direct_list, background_list)

    combined_dataset = ReflectDataset()

    if data_folder is None:
        data_folder = os.getcwd()

    if norm_file_num:
        norm_datafile = number_datafile(norm_file_num, prefix=prefix)
        kwds['h5norm'] = norm_datafile

    if prefix == 'PLP':
        reducer_klass = PlatypusReduce
    else:
        raise ValueError("Incorrect prefix specified")

    for index, val in enumerate(zipped):
        reflect_datafile = os.path.join(data_folder,
                                        number_datafile(val[0], prefix=prefix))
        direct_datafile = os.path.join(data_folder,
                                       number_datafile(val[1], prefix=prefix))

        reducer = reducer_klass(direct_datafile)
        datasets, fnames = reducer.reduce(reflect_datafile,
                                          save=save,
                                          background=val[2],
                                          **kwds_copy)

        if not index:
            datasets[0].scale(scale)

        combined_dataset.add_data(datasets[0].data,
                                  requires_splice=True,
                                  trim_trailing=trim_trailing)

    fname_dat = None

    if save:
        # this will give us <fname>.nx.hdf
        # if reflect_list was an integer you'll get PLP0000708.nx.hdf
        fname = number_datafile(reflect_list[0], prefix=prefix)
        # now chop off .nx.hdf extension
        fname = basename_datafile(fname)

        fname_dat = 'c_{0}.dat'.format(fname)
        with open(fname_dat, 'wb') as f:
            combined_dataset.save(f)
        fname_xml = 'c_{0}.xml'.format(fname)
        with open(fname_xml, 'wb') as f:
            combined_dataset.save_xml(f)

    return combined_dataset, fname_dat
Example #3
0
def reduce_stitch(reflect_list, direct_list, norm_file_num=None,
                  data_folder=None, trim_trailing=True, save=True, **kwds):
    """
    Reduces a list of reflected beam run numbers and a list of corresponding
    direct beam run numbers from the Platypus reflectometer. If there are
    multiple reflectivity files they are spliced together.

    Parameters
    ----------
    reflect_list : list
        Reflected beam run numbers, e.g. `[708, 709, 710]`
        708 corresponds to the file PLP0000708.nx.hdf.
    direct_list : list
        Direct beam run numbers, e.g. `[711, 711, 711]`
    norm_file_num : int, optional
        The run number for the water flood field correction.
    data_folder : str, optional
        Where is the raw data stored?
    trim_trailing : bool, optional
        When datasets are spliced together do you want to remove points in the
        overlap region from the preceding dataset?
    save : bool, optional
        If `True` then the spliced file is written to a file (in the working
        directory) with a name like: `c_PLP0000708.dat`.
    kwds : dict, optional
        Options passed directly to `refnx.reduce.platypusnexus.process`,
        for processing of individual spectra. Look at that method docstring
        for specification of options.

    Returns
    -------
    combined_dataset, reduced_filename : refnx.dataset.ReflectDataset, str
        The combined dataset and the file name of the reduced data, if it was
        saved. If it wasn't saved `reduced_filename` is `None`.
    """
    scale = kwds.get('scale', 1.)

    # now reduce all the files.
    zipped = zip(reflect_list, direct_list)

    combined_dataset = ReflectDataset()

    if data_folder is None:
        data_folder = os.getcwd()

    if norm_file_num:
        norm_datafile = number_datafile(norm_file_num)
        kwds['h5norm'] = norm_datafile

    for index, val in enumerate(zipped):
        reflect_datafile = os.path.join(data_folder,
                                        number_datafile(val[0]))
        direct_datafile = os.path.join(data_folder,
                                       number_datafile(val[1]))

        reduced = ReducePlatypus(direct_datafile,
                                 reflect=reflect_datafile,
                                 save=save,
                                 **kwds)
        if not index:
            reduced.scale(scale)

        combined_dataset.add_data(reduced.data(),
                                  requires_splice=True,
                                  trim_trailing=trim_trailing)

    fname = None
    if save:
        fname = 'c_PLP{0:07d}.dat'.format(reflect_list[0])
        with open(fname, 'wb') as f:
            combined_dataset.save(f)
        fname = 'c_PLP{0:07d}.xml'.format(reflect_list[0])
        with open(fname, 'wb') as f:
            combined_dataset.save_xml(f)

    return combined_dataset, fname
Example #4
0
    def _reduce_single_angle(self, scale=1):
        """
        Reduce a single angle.
        """
        n_spectra = self.reflected_beam.n_spectra
        n_tpixels = np.size(self.reflected_beam.m_topandtail, 1)
        n_ypixels = np.size(self.reflected_beam.m_topandtail, 2)

        # calculate omega and two_theta depending on the mode.
        mode = self.reflected_beam.mode

        # we'll need the wavelengths to calculate Q.
        wavelengths = self.reflected_beam.m_lambda
        m_twotheta = np.zeros((n_spectra, n_tpixels, n_ypixels))

        if mode in ['FOC', 'POL', 'POLANAL', 'MT']:
            detector_z_difference = (self.reflected_beam.detector_z -
                                     self.direct_beam.detector_z)
            beampos_z_difference = (self.reflected_beam.m_beampos
                                    - self.direct_beam.m_beampos)

            total_z_deflection = (detector_z_difference
                                  + beampos_z_difference * Y_PIXEL_SPACING)

            # omega_nom.shape = (N, )
            omega_nom = np.degrees(np.arctan(total_z_deflection
                                   / self.reflected_beam.detector_y) / 2.)

            '''
            Wavelength specific angle of incidence correction
            This involves:
            1) working out the trajectory of the neutrons through the
            collimation system.
            2) where those neutrons intersect the sample.
            3) working out the elevation of the neutrons when they hit the
            sample.
            4) correcting the angle of incidence.
            '''
            speeds = general.wavelength_velocity(wavelengths)
            collimation_distance = self.reflected_beam.cat.collimation_distance
            s2_sample_distance = (self.reflected_beam.cat.sample_distance
                                  - self.reflected_beam.cat.slit2_distance)

            # work out the trajectories of the neutrons for them to pass
            # through the collimation system.
            trajectories = pm.find_trajectory(collimation_distance / 1000.,
                                              0, speeds)
            
            # work out where the beam hits the sample
            res = pm.parabola_line_intersection_point(s2_sample_distance / 1000,
                                                      0,
                                                      trajectories,
                                                      speeds,
                                                      omega_nom[:, np.newaxis])
            intersect_x, intersect_y, x_prime, elevation = res

            # correct the angle of incidence with a wavelength dependent
            # elevation.
            omega_corrected = omega_nom[:, np.newaxis] - elevation

        elif mode == 'SB' or mode == 'DB':
            omega = self.reflected_beam.M_beampos + self.reflected_beam.detectorZ[:, np.newaxis]
            omega -= self.direct_beam.M_beampos + self.direct_beam.detectorZ
            omega /= 2 * self.reflected_beam.detectorY[:, np.newaxis, np.newaxis]
            omega = np.arctan(omega)

            m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :] * Y_PIXEL_SPACING
            m_twotheta += self.reflected_beam.detectorZ[:, np.newaxis, np.newaxis]
            m_twotheta -= self.direct_beam.M_beampos[:, :, np.newaxis] + self.direct_beam.detectorZ
            m_twotheta -= self.reflected_beam.detectorY[:, np.newaxis, np.newaxis] * np.tan(omega[:, :, np.newaxis])

            m_twotheta /= self.reflected_beam.detectorY[:, np.newaxis, np.newaxis]
            m_twotheta = np.arctan(m_twotheta)
            m_twotheta += omega[:, :, np.newaxis]

        '''
        --Specular Reflectivity--
        Use the (constant wavelength) spectra that have already been integrated
        over 2theta (in processnexus) to calculate the specular reflectivity.
        Beware: this is because m_topandtail has already been divided through
        by monitor counts and error propagated (at the end of processnexus).
        Thus, the 2theta pixels are correlated to some degree. If we use the 2D
        plot to calculate reflectivity
        (sum {Iref_{2theta, lambda}}/I_direct_{lambda}) then the error bars in
        the reflectivity turn out much larger than they should be.
        '''
        ydata, ydata_sd = EP.EPdiv(self.reflected_beam.m_spec,
                                   self.reflected_beam.m_spec_sd,
                                   self.direct_beam.m_spec,
                                   self.direct_beam.m_spec_sd)

        # calculate the 1D Qz values.
        xdata = general.q(omega_corrected, wavelengths)
        xdata_sd = (self.reflected_beam.m_lambda_fwhm
                    / self.reflected_beam.m_lambda) ** 2
        xdata_sd += (self.reflected_beam.domega[:, np.newaxis]
                     / omega_corrected) ** 2
        xdata_sd = np.sqrt(xdata_sd) * xdata

        '''
        ---Offspecular reflectivity---
        normalise the counts in the reflected beam by the direct beam
        spectrum this gives a reflectivity. Also propagate the errors,
        leaving the fractional variance (dr/r)^2.
        --Note-- that adjacent y-pixels (same wavelength) are correlated in this
        treatment, so you can't just sum over them.
        i.e. (c_0 / d) + ... + c_n / d) != (c_0 + ... + c_n) / d
        '''
        m_ref, m_ref_sd = EP.EPdiv(self.reflected_beam.m_topandtail,
                                   self.reflected_beam.m_topandtail_sd,
                                   self.direct_beam.m_spec[:, :, np.newaxis],
                                   self.direct_beam.m_spec_sd[:, :, np.newaxis])

        # you may have had divide by zero's.
        m_ref = np.where(np.isinf(m_ref), 0, m_ref)
        m_ref_sd = np.where(np.isinf(m_ref_sd), 0, m_ref_sd)

        # calculate the Q values for the detector pixels.  Each pixel has
        # different 2theta and different wavelength, ASSUME that they have the
        # same angle of incidence
        qx, qy, qz = general.q2(omega_corrected[:, :, np.newaxis],
                                m_twotheta,
                                0,
                                wavelengths[:, :, np.newaxis])

        reduction = {}
        reduction['xdata'] = self.xdata = xdata
        reduction['xdata_sd'] = self.xdata_sd = xdata_sd
        reduction['ydata'] = self.ydata = ydata
        reduction['ydata_sd'] = self.ydata_sd = ydata_sd
        reduction['m_ref'] = self.m_ref = m_ref
        reduction['m_ref_sd'] = self.m_ref_sd = m_ref_sd
        reduction['qz'] = self.m_qz = qz
        reduction['qy'] = self.m_qy = qy
        reduction['nspectra'] = self.n_spectra = n_spectra
        reduction['datafile_number'] = self.datafile_number = (
            self.reflected_beam.datafile_number)

        fnames = []
        if self.save:
            for i in range(n_spectra):
                data_tup = self.data(scanpoint=i)
                dataset = ReflectDataset(data_tup)
                fname = 'PLP{0:07d}_{1}.dat'.format(self.datafile_number, i)
                fnames.append(fname)
                with open(fname, 'wb') as f:
                    dataset.save(f)
                fname = 'PLP{0:07d}_{1}.xml'.format(self.datafile_number, i)
                with open(fname, 'wb') as f:
                    dataset.save_xml(f)

        reduction['fname'] = fnames
        return deepcopy(reduction)
Example #5
0
def reduce_stitch(
    reflect_list,
    direct_list,
    data_folder=None,
    prefix="PLP",
    trim_trailing=True,
    save=True,
    scale=1.0,
    reduction_options=None,
):
    """
    Reduces a list of reflected beam run numbers and a list of corresponding
    direct beam run numbers from the Platypus/Spatz reflectometers. If there
    are multiple reflectivity files they are spliced together.

    Parameters
    ----------
    reflect_list : list
        Reflected beam run numbers, e.g. `[708, 709, 710]`
        708 corresponds to the file PLP0000708.nx.hdf.
    direct_list : list
        Direct beam run numbers, e.g. `[711, 711, 711]`
    data_folder : str, optional
        Where is the raw data stored?
    prefix : str, optional
        The instrument filename prefix.
    trim_trailing : bool, optional
        When datasets are spliced together do you want to remove points in the
        overlap region from the preceding dataset?
    save : bool, optional
        If `True` then the spliced file is written to a file (in the working
        directory) with a name like: `c_PLP0000708.dat`.
    scale : float, optional
        Scales the data by this value.
    reduction_options : None, dict, or list of dict, optional
        Options passed directly to `refnx.reduce.PlatypusNexus.process`,
        for processing of individual spectra. Look at that method docstring
        for specification of options. If an individual dict then the same
        options are used to process all datasets. A list (or sequence) of
        dict can be used to specify different options for each datasets. If
        None, then a default set of reduction options will be used.

    Returns
    -------
    combined_dataset, reduced_filename : refnx.dataset.ReflectDataset, str
        The combined dataset and the file name of the reduced data, if it was
        saved. If it wasn't saved `reduced_filename` is `None`.

    Notes
    -----
    The `prefix` is used to specify the run numbers to a filename.
    For example a run number of 10, and a prefix of `PLP` resolves to a
    NeXus filename of 'PLP0000010.nx.hdf'.

    Examples
    --------

    >>> from refnx.reduce import reduce_stitch
    >>> dataset, fname = reduce_stitch([708, 709, 710],
    ...                                [711, 711, 711],
    ...                                reduction_options={"rebin_percent": 2})

    """
    options = [ReductionOptions()] * len(reflect_list)
    try:
        if reduction_options is not None:
            options = []
            for i in range(len(reflect_list)):
                if isinstance(reduction_options[i], dict):
                    options.append(reduction_options[i])
                else:
                    options.append(ReductionOptions())
    except KeyError:
        # reduction_options may be an individual dict
        if isinstance(reduction_options, dict):
            options = [reduction_options] * len(reflect_list)

    # now reduce all the files.
    zipped = zip(reflect_list, direct_list, options)

    combined_dataset = ReflectDataset()

    if data_folder is None:
        data_folder = os.getcwd()

    if prefix == "PLP":
        reducer_klass = PlatypusReduce
    elif prefix == "SPZ":
        reducer_klass = SpatzReduce
    else:
        raise ValueError("Incorrect prefix specified")

    for index, val in enumerate(zipped):
        reflect_datafile = os.path.join(data_folder,
                                        number_datafile(val[0], prefix=prefix))
        direct_datafile = os.path.join(data_folder,
                                       number_datafile(val[1], prefix=prefix))

        reducer = reducer_klass(direct_datafile)
        datasets, fnames = reducer.reduce(reflect_datafile,
                                          save=save,
                                          **val[2])

        if not index:
            datasets[0].scale(scale)

        combined_dataset.add_data(datasets[0].data,
                                  requires_splice=True,
                                  trim_trailing=trim_trailing)

    fname_dat = None

    if save:
        # this will give us <fname>.nx.hdf
        # if reflect_list was an integer you'll get PLP0000708.nx.hdf
        fname = number_datafile(reflect_list[0], prefix=prefix)
        # now chop off .nx.hdf extension
        fname = basename_datafile(fname)

        fname_dat = "c_{0}.dat".format(fname)
        with open(fname_dat, "wb") as f:
            combined_dataset.save(f)
        fname_xml = "c_{0}.xml".format(fname)
        with open(fname_xml, "wb") as f:
            combined_dataset.save_xml(f)

    return combined_dataset, fname_dat
Example #6
0
    def reducer(self, callback=None):
        """
        Reduce all the entries in reduction_entries

        Parameters
        ----------
        callback : callable
            Function, `f(percent_finished)` that is called with the current
            percentage progress of the reduction
        """

        # refnx.reduce.reduce needs you to be in the directory where you're
        # going to write files to
        if self.output_directory:
            os.chdir(self.output_directory)

        # if no data directory was specified then assume it's the cwd
        data_directory = self.data_directory
        if not data_directory:
            data_directory = './'

        def full_path(fname):
            f = os.path.join(data_directory, fname)
            return f

        # if the streamed directory isn't mentioned then assume it's the same
        # as the data directory
        streamed_directory = self.streamed_directory
        if not os.path.isdir(streamed_directory):
            self.streamed_directory = data_directory

        logging.info('-------------------------------------------------------'
                     '\nStarting reduction run')
        logging.info(
            'data_folder={data_directory}, trim_trailing=True, '
            'lo_wavelength={low_wavelength}, '
            'hi_wavelength={high_wavelength}, '
            'rebin_percent={rebin_percent}, '
            'normalise={monitor_normalisation}, '
            'background={background_subtraction} '
            'eventmode={streamed_reduction} '
            'event_folder={streamed_directory}'.format(**self.__dict__))

        # sets up time slices for event reduction
        if self.streamed_reduction:
            eventmode = np.arange(self.stream_start,
                                  self.stream_end,
                                  self.stream_duration)
            eventmode = np.r_[eventmode, self.stream_end]
        else:
            eventmode = None

        # are you manual beamfinding?
        peak_pos = None
        if (self.manual_beam_find and
                self.manual_beam_finder is not None):
            peak_pos = -1

        idx = 0

        cached_direct_beams = {}

        for row, val in self.reduction_entries.items():
            if not val['use']:
                continue

            flood = None
            if val['flood']:
                flood = val['flood']

            combined_dataset = None

            # process entries one by one
            for ref, db in zip(['reflect-1', 'reflect-2', 'reflect-3'],
                               ['direct-1', 'direct-2', 'direct-3']):
                reflect = val[ref]
                direct = val[db]

                # if the file doesn't exist there's no point continuing
                if ((not os.path.isfile(full_path(reflect))) or
                        (not os.path.isfile(full_path(direct)))):
                    continue

                # which of the nspectra to reduce (or all)
                ref_pn = PlatypusNexus(reflect)

                if direct not in cached_direct_beams:
                    cached_direct_beams[direct] = PlatypusReduce(
                        direct,
                        data_folder=data_directory)

                reducer = cached_direct_beams[direct]

                reduced = reducer(
                    ref_pn, scale=val['scale'],
                    norm_file_num=flood,
                    lo_wavelength=self.low_wavelength,
                    hi_wavelength=self.high_wavelength,
                    rebin_percent=self.rebin_percent,
                    normalise=self.monitor_normalisation,
                    background=self.background_subtraction,
                    manual_beam_find=self.manual_beam_finder,
                    peak_pos=peak_pos,
                    eventmode=eventmode,
                    event_folder=streamed_directory)

                logging.info(
                    'Reduced {} vs {}, scale={}, angle={}'.format(
                        reflect, direct, val['scale'],
                        reduced['omega'][0, 0]))

                if combined_dataset is None:
                    combined_dataset = ReflectDataset()

                    fname = basename_datafile(reflect)
                    fname_dat = os.path.join(self.output_directory,
                                             'c_{0}.dat'.format(fname))
                    fname_xml = os.path.join(self.output_directory,
                                             'c_{0}.xml'.format(fname))

                combined_dataset.add_data(reducer.data(),
                                          requires_splice=True,
                                          trim_trailing=True)

            if combined_dataset is not None:
                # after you've finished reducing write a combined file.
                with open(fname_dat, 'wb') as f:
                    combined_dataset.save(f)
                with open(fname_xml, 'wb') as f:
                    combined_dataset.save_xml(f)
                logging.info(
                    'Written combined files: {} and {}'.format(
                        fname_dat, fname_xml))

            # can be used to create a progress bar
            idx += 1
            if callback is not None:
                ok = callback(100 * idx / len(self.reduction_entries))
                if not ok:
                    break

        logging.info('\nFinished reduction run'
                     '-------------------------------------------------------')