Exemplo n.º 1
0
    def setUp(self):
        data = ReflectDataset()

        x1 = np.linspace(0, 10, 5)
        y1 = 2 * x1
        e1 = np.ones_like(x1)
        dx1 = np.ones_like(x1)
        data.add_data((x1, y1, e1, dx1))
        self.data = data
Exemplo n.º 2
0
    def setUp(self):
        data = ReflectDataset()

        x1 = np.linspace(0, 10, 5)
        y1 = 2 * x1
        e1 = np.ones_like(x1)
        dx1 = np.ones_like(x1)
        data.add_data((x1, y1, e1, dx1))
        self.data = data

        self.cwd = os.getcwd()
        self.tmpdir = TemporaryDirectory()
        os.chdir(self.tmpdir.name)
Exemplo n.º 3
0
    def setup_method(self, tmpdir):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        data = ReflectDataset()

        x1 = np.linspace(0, 10, 115)
        y1 = 2 * x1
        e1 = np.ones_like(x1)
        dx1 = np.ones_like(x1)
        data.add_data((x1, y1, e1, dx1))
        self.data = data

        self.cwd = os.getcwd()
        self.tmpdir = tmpdir.strpath
        os.chdir(self.tmpdir)
Exemplo n.º 4
0
    def reducer(self, callback=None):
        """
        Reduce all the entries in reduction_entries

        Parameters
        ----------
        callback : callable
            Function, `f(percent_finished)` that is called with the current
            percentage progress of the reduction
        """

        # refnx.reduce.reduce needs you to be in the directory where you're
        # going to write files to
        if self.output_directory:
            os.chdir(self.output_directory)

        # if no data directory was specified then assume it's the cwd
        data_directory = self.data_directory
        if not data_directory:
            data_directory = "./"

        def full_path(fname):
            f = os.path.join(data_directory, fname)
            return f

        # if the streamed directory isn't mentioned then assume it's the same
        # as the data directory
        streamed_directory = self.streamed_directory
        if not os.path.isdir(streamed_directory):
            self.streamed_directory = data_directory

        logging.info("-------------------------------------------------------"
                     "\nStarting reduction run")
        logging.info(
            "data_folder={data_directory}, trim_trailing=True, "
            "lo_wavelength={low_wavelength}, "
            "hi_wavelength={high_wavelength}, "
            "rebin_percent={rebin_percent}, "
            "normalise={monitor_normalisation}, "
            "background={background_subtraction} "
            "eventmode={streamed_reduction} "
            "event_folder={streamed_directory}".format(**self.__dict__))

        # sets up time slices for event reduction
        if self.streamed_reduction:
            eventmode = np.arange(self.stream_start, self.stream_end,
                                  self.stream_duration)
            eventmode = np.r_[eventmode, self.stream_end]
        else:
            eventmode = None

        # are you manual beamfinding?
        peak_pos = None
        if self.manual_beam_find and self.manual_beam_finder is not None:
            peak_pos = -1

        idx = 0

        cached_direct_beams = {}

        for row, val in self.reduction_entries.items():
            if not val["use"]:
                continue

            flood = None
            if val["flood"]:
                flood = full_path(val["flood"])

            combined_dataset = None

            # process entries one by one
            for ref, db in zip(
                ["reflect-1", "reflect-2", "reflect-3"],
                ["direct-1", "direct-2", "direct-3"],
            ):
                reflect = val[ref]
                direct = val[db]

                # if the file doesn't exist there's no point continuing
                if (not os.path.isfile(full_path(reflect))) or (
                        not os.path.isfile(full_path(direct))):
                    continue

                # which of the nspectra to reduce (or all)
                ref_pn = PlatypusNexus(full_path(reflect))

                if direct not in cached_direct_beams:
                    cached_direct_beams[direct] = PlatypusReduce(
                        direct, data_folder=data_directory)

                reducer = cached_direct_beams[direct]

                try:
                    reduced = reducer(
                        ref_pn,
                        scale=val["scale"],
                        h5norm=flood,
                        lo_wavelength=self.low_wavelength,
                        hi_wavelength=self.high_wavelength,
                        rebin_percent=self.rebin_percent,
                        normalise=self.monitor_normalisation,
                        background=self.background_subtraction,
                        manual_beam_find=self.manual_beam_finder,
                        peak_pos=peak_pos,
                        eventmode=eventmode,
                        event_folder=streamed_directory,
                    )
                except Exception as e:
                    # typical Exception would be ValueError for non overlapping
                    # angles
                    logging.info(e)
                    continue

                logging.info("Reduced {} vs {}, scale={}, angle={}".format(
                    reflect,
                    direct,
                    val["scale"],
                    reduced[1]["omega"][0, 0],
                ))

                if combined_dataset is None:
                    combined_dataset = ReflectDataset()

                    fname = basename_datafile(reflect)
                    fname_dat = os.path.join(self.output_directory,
                                             "c_{0}.dat".format(fname))
                    fname_xml = os.path.join(self.output_directory,
                                             "c_{0}.xml".format(fname))

                try:
                    combined_dataset.add_data(
                        reducer.data(),
                        requires_splice=True,
                        trim_trailing=True,
                    )
                except ValueError as e:
                    # datasets don't overlap
                    logging.info(e)
                    continue

            if combined_dataset is not None:
                # after you've finished reducing write a combined file.
                with open(fname_dat, "wb") as f:
                    combined_dataset.save(f)
                with open(fname_xml, "wb") as f:
                    combined_dataset.save_xml(f)
                logging.info("Written combined files: {} and {}".format(
                    fname_dat, fname_xml))

            # can be used to create a progress bar
            idx += 1
            if callback is not None:
                ok = callback(100 * idx / len(self.reduction_entries))
                if not ok:
                    break

        logging.info("\nFinished reduction run"
                     "-------------------------------------------------------")
Exemplo n.º 5
0
def reduce_stitch(reflect_list,
                  direct_list,
                  background_list=None,
                  norm_file_num=None,
                  data_folder=None,
                  prefix='PLP',
                  trim_trailing=True,
                  save=True,
                  **kwds):
    """
    Reduces a list of reflected beam run numbers and a list of corresponding
    direct beam run numbers from the Platypus reflectometer. If there are
    multiple reflectivity files they are spliced together.

    Parameters
    ----------
    reflect_list : list
        Reflected beam run numbers, e.g. `[708, 709, 710]`
        708 corresponds to the file PLP0000708.nx.hdf.
    direct_list : list
        Direct beam run numbers, e.g. `[711, 711, 711]`
    background_list : list, optional
        List of `bool` to control whether background subtraction is used
        for each reduction, e.g. `[False, True, True]`. The default is to do
        a background subtraction on all runs.
    norm_file_num : int, optional
        The run number for the water flood field correction.
    data_folder : str, optional
        Where is the raw data stored?
    prefix : str, optional
        The instrument filename prefix.
    trim_trailing : bool, optional
        When datasets are spliced together do you want to remove points in the
        overlap region from the preceding dataset?
    save : bool, optional
        If `True` then the spliced file is written to a file (in the working
        directory) with a name like: `c_PLP0000708.dat`.
    kwds : dict, optional
        Options passed directly to `refnx.reduce.platypusnexus.process`,
        for processing of individual spectra. Look at that method docstring
        for specification of options.

    Returns
    -------
    combined_dataset, reduced_filename : refnx.dataset.ReflectDataset, str
        The combined dataset and the file name of the reduced data, if it was
        saved. If it wasn't saved `reduced_filename` is `None`.

    Notes
    -----
    If `background` is in the supplied `kwds` it is ignored.
    The `prefix` is used to specify the run numbers to a filename.
    For example a run number of 10, and a prefix of `PLP` resolves to a
    NeXus filename of 'PLP0000010.nx.hdf'.

    Examples
    --------

    >>> from refnx.reduce import reduce_stitch
    >>> dataset, fname = reduce_stitch([708, 709, 710],
    ...                                [711, 711, 711],
    ...                                 rebin_percent=2)

    """

    scale = kwds.get('scale', 1.)

    kwds_copy = {}
    kwds_copy.update(kwds)
    kwds_copy.pop('background', None)

    if not background_list:
        background_list = [True] * len(reflect_list)

    # now reduce all the files.
    zipped = zip(reflect_list, direct_list, background_list)

    combined_dataset = ReflectDataset()

    if data_folder is None:
        data_folder = os.getcwd()

    if norm_file_num:
        norm_datafile = number_datafile(norm_file_num, prefix=prefix)
        kwds['h5norm'] = norm_datafile

    if prefix == 'PLP':
        reducer_klass = PlatypusReduce
    else:
        raise ValueError("Incorrect prefix specified")

    for index, val in enumerate(zipped):
        reflect_datafile = os.path.join(data_folder,
                                        number_datafile(val[0], prefix=prefix))
        direct_datafile = os.path.join(data_folder,
                                       number_datafile(val[1], prefix=prefix))

        reducer = reducer_klass(direct_datafile)
        datasets, fnames = reducer.reduce(reflect_datafile,
                                          save=save,
                                          background=val[2],
                                          **kwds_copy)

        if not index:
            datasets[0].scale(scale)

        combined_dataset.add_data(datasets[0].data,
                                  requires_splice=True,
                                  trim_trailing=trim_trailing)

    fname_dat = None

    if save:
        # this will give us <fname>.nx.hdf
        # if reflect_list was an integer you'll get PLP0000708.nx.hdf
        fname = number_datafile(reflect_list[0], prefix=prefix)
        # now chop off .nx.hdf extension
        fname = basename_datafile(fname)

        fname_dat = 'c_{0}.dat'.format(fname)
        with open(fname_dat, 'wb') as f:
            combined_dataset.save(f)
        fname_xml = 'c_{0}.xml'.format(fname)
        with open(fname_xml, 'wb') as f:
            combined_dataset.save_xml(f)

    return combined_dataset, fname_dat
Exemplo n.º 6
0
def reduce_stitch(reflect_list, direct_list, norm_file_num=None,
                  data_folder=None, trim_trailing=True, save=True, **kwds):
    """
    Reduces a list of reflected beam run numbers and a list of corresponding
    direct beam run numbers from the Platypus reflectometer. If there are
    multiple reflectivity files they are spliced together.

    Parameters
    ----------
    reflect_list : list
        Reflected beam run numbers, e.g. `[708, 709, 710]`
        708 corresponds to the file PLP0000708.nx.hdf.
    direct_list : list
        Direct beam run numbers, e.g. `[711, 711, 711]`
    norm_file_num : int, optional
        The run number for the water flood field correction.
    data_folder : str, optional
        Where is the raw data stored?
    trim_trailing : bool, optional
        When datasets are spliced together do you want to remove points in the
        overlap region from the preceding dataset?
    save : bool, optional
        If `True` then the spliced file is written to a file (in the working
        directory) with a name like: `c_PLP0000708.dat`.
    kwds : dict, optional
        Options passed directly to `refnx.reduce.platypusnexus.process`,
        for processing of individual spectra. Look at that method docstring
        for specification of options.

    Returns
    -------
    combined_dataset, reduced_filename : refnx.dataset.ReflectDataset, str
        The combined dataset and the file name of the reduced data, if it was
        saved. If it wasn't saved `reduced_filename` is `None`.
    """
    scale = kwds.get('scale', 1.)

    # now reduce all the files.
    zipped = zip(reflect_list, direct_list)

    combined_dataset = ReflectDataset()

    if data_folder is None:
        data_folder = os.getcwd()

    if norm_file_num:
        norm_datafile = number_datafile(norm_file_num)
        kwds['h5norm'] = norm_datafile

    for index, val in enumerate(zipped):
        reflect_datafile = os.path.join(data_folder,
                                        number_datafile(val[0]))
        direct_datafile = os.path.join(data_folder,
                                       number_datafile(val[1]))

        reduced = ReducePlatypus(direct_datafile,
                                 reflect=reflect_datafile,
                                 save=save,
                                 **kwds)
        if not index:
            reduced.scale(scale)

        combined_dataset.add_data(reduced.data(),
                                  requires_splice=True,
                                  trim_trailing=trim_trailing)

    fname = None
    if save:
        fname = 'c_PLP{0:07d}.dat'.format(reflect_list[0])
        with open(fname, 'wb') as f:
            combined_dataset.save(f)
        fname = 'c_PLP{0:07d}.xml'.format(reflect_list[0])
        with open(fname, 'wb') as f:
            combined_dataset.save_xml(f)

    return combined_dataset, fname
Exemplo n.º 7
0
def reduce_stitch(
    reflect_list,
    direct_list,
    data_folder=None,
    prefix="PLP",
    trim_trailing=True,
    save=True,
    scale=1.0,
    reduction_options=None,
):
    """
    Reduces a list of reflected beam run numbers and a list of corresponding
    direct beam run numbers from the Platypus/Spatz reflectometers. If there
    are multiple reflectivity files they are spliced together.

    Parameters
    ----------
    reflect_list : list
        Reflected beam run numbers, e.g. `[708, 709, 710]`
        708 corresponds to the file PLP0000708.nx.hdf.
    direct_list : list
        Direct beam run numbers, e.g. `[711, 711, 711]`
    data_folder : str, optional
        Where is the raw data stored?
    prefix : str, optional
        The instrument filename prefix.
    trim_trailing : bool, optional
        When datasets are spliced together do you want to remove points in the
        overlap region from the preceding dataset?
    save : bool, optional
        If `True` then the spliced file is written to a file (in the working
        directory) with a name like: `c_PLP0000708.dat`.
    scale : float, optional
        Scales the data by this value.
    reduction_options : None, dict, or list of dict, optional
        Options passed directly to `refnx.reduce.PlatypusNexus.process`,
        for processing of individual spectra. Look at that method docstring
        for specification of options. If an individual dict then the same
        options are used to process all datasets. A list (or sequence) of
        dict can be used to specify different options for each datasets. If
        None, then a default set of reduction options will be used.

    Returns
    -------
    combined_dataset, reduced_filename : refnx.dataset.ReflectDataset, str
        The combined dataset and the file name of the reduced data, if it was
        saved. If it wasn't saved `reduced_filename` is `None`.

    Notes
    -----
    The `prefix` is used to specify the run numbers to a filename.
    For example a run number of 10, and a prefix of `PLP` resolves to a
    NeXus filename of 'PLP0000010.nx.hdf'.

    Examples
    --------

    >>> from refnx.reduce import reduce_stitch
    >>> dataset, fname = reduce_stitch([708, 709, 710],
    ...                                [711, 711, 711],
    ...                                reduction_options={"rebin_percent": 2})

    """
    options = [ReductionOptions()] * len(reflect_list)
    try:
        if reduction_options is not None:
            options = []
            for i in range(len(reflect_list)):
                if isinstance(reduction_options[i], dict):
                    options.append(reduction_options[i])
                else:
                    options.append(ReductionOptions())
    except KeyError:
        # reduction_options may be an individual dict
        if isinstance(reduction_options, dict):
            options = [reduction_options] * len(reflect_list)

    # now reduce all the files.
    zipped = zip(reflect_list, direct_list, options)

    combined_dataset = ReflectDataset()

    if data_folder is None:
        data_folder = os.getcwd()

    if prefix == "PLP":
        reducer_klass = PlatypusReduce
    elif prefix == "SPZ":
        reducer_klass = SpatzReduce
    else:
        raise ValueError("Incorrect prefix specified")

    for index, val in enumerate(zipped):
        reflect_datafile = os.path.join(data_folder,
                                        number_datafile(val[0], prefix=prefix))
        direct_datafile = os.path.join(data_folder,
                                       number_datafile(val[1], prefix=prefix))

        reducer = reducer_klass(direct_datafile)
        datasets, fnames = reducer.reduce(reflect_datafile,
                                          save=save,
                                          **val[2])

        if not index:
            datasets[0].scale(scale)

        combined_dataset.add_data(datasets[0].data,
                                  requires_splice=True,
                                  trim_trailing=trim_trailing)

    fname_dat = None

    if save:
        # this will give us <fname>.nx.hdf
        # if reflect_list was an integer you'll get PLP0000708.nx.hdf
        fname = number_datafile(reflect_list[0], prefix=prefix)
        # now chop off .nx.hdf extension
        fname = basename_datafile(fname)

        fname_dat = "c_{0}.dat".format(fname)
        with open(fname_dat, "wb") as f:
            combined_dataset.save(f)
        fname_xml = "c_{0}.xml".format(fname)
        with open(fname_xml, "wb") as f:
            combined_dataset.save_xml(f)

    return combined_dataset, fname_dat
Exemplo n.º 8
0
    def reducer(self, callback=None):
        """
        Reduce all the entries in reduction_entries

        Parameters
        ----------
        callback : callable
            Function, `f(percent_finished)` that is called with the current
            percentage progress of the reduction
        """

        # refnx.reduce.reduce needs you to be in the directory where you're
        # going to write files to
        if self.output_directory:
            os.chdir(self.output_directory)

        # if no data directory was specified then assume it's the cwd
        data_directory = self.data_directory
        if not data_directory:
            data_directory = './'

        def full_path(fname):
            f = os.path.join(data_directory, fname)
            return f

        # if the streamed directory isn't mentioned then assume it's the same
        # as the data directory
        streamed_directory = self.streamed_directory
        if not os.path.isdir(streamed_directory):
            self.streamed_directory = data_directory

        logging.info('-------------------------------------------------------'
                     '\nStarting reduction run')
        logging.info(
            'data_folder={data_directory}, trim_trailing=True, '
            'lo_wavelength={low_wavelength}, '
            'hi_wavelength={high_wavelength}, '
            'rebin_percent={rebin_percent}, '
            'normalise={monitor_normalisation}, '
            'background={background_subtraction} '
            'eventmode={streamed_reduction} '
            'event_folder={streamed_directory}'.format(**self.__dict__))

        # sets up time slices for event reduction
        if self.streamed_reduction:
            eventmode = np.arange(self.stream_start,
                                  self.stream_end,
                                  self.stream_duration)
            eventmode = np.r_[eventmode, self.stream_end]
        else:
            eventmode = None

        # are you manual beamfinding?
        peak_pos = None
        if (self.manual_beam_find and
                self.manual_beam_finder is not None):
            peak_pos = -1

        idx = 0

        cached_direct_beams = {}

        for row, val in self.reduction_entries.items():
            if not val['use']:
                continue

            flood = None
            if val['flood']:
                flood = val['flood']

            combined_dataset = None

            # process entries one by one
            for ref, db in zip(['reflect-1', 'reflect-2', 'reflect-3'],
                               ['direct-1', 'direct-2', 'direct-3']):
                reflect = val[ref]
                direct = val[db]

                # if the file doesn't exist there's no point continuing
                if ((not os.path.isfile(full_path(reflect))) or
                        (not os.path.isfile(full_path(direct)))):
                    continue

                # which of the nspectra to reduce (or all)
                ref_pn = PlatypusNexus(reflect)

                if direct not in cached_direct_beams:
                    cached_direct_beams[direct] = PlatypusReduce(
                        direct,
                        data_folder=data_directory)

                reducer = cached_direct_beams[direct]

                reduced = reducer(
                    ref_pn, scale=val['scale'],
                    norm_file_num=flood,
                    lo_wavelength=self.low_wavelength,
                    hi_wavelength=self.high_wavelength,
                    rebin_percent=self.rebin_percent,
                    normalise=self.monitor_normalisation,
                    background=self.background_subtraction,
                    manual_beam_find=self.manual_beam_finder,
                    peak_pos=peak_pos,
                    eventmode=eventmode,
                    event_folder=streamed_directory)

                logging.info(
                    'Reduced {} vs {}, scale={}, angle={}'.format(
                        reflect, direct, val['scale'],
                        reduced['omega'][0, 0]))

                if combined_dataset is None:
                    combined_dataset = ReflectDataset()

                    fname = basename_datafile(reflect)
                    fname_dat = os.path.join(self.output_directory,
                                             'c_{0}.dat'.format(fname))
                    fname_xml = os.path.join(self.output_directory,
                                             'c_{0}.xml'.format(fname))

                combined_dataset.add_data(reducer.data(),
                                          requires_splice=True,
                                          trim_trailing=True)

            if combined_dataset is not None:
                # after you've finished reducing write a combined file.
                with open(fname_dat, 'wb') as f:
                    combined_dataset.save(f)
                with open(fname_xml, 'wb') as f:
                    combined_dataset.save_xml(f)
                logging.info(
                    'Written combined files: {} and {}'.format(
                        fname_dat, fname_xml))

            # can be used to create a progress bar
            idx += 1
            if callback is not None:
                ok = callback(100 * idx / len(self.reduction_entries))
                if not ok:
                    break

        logging.info('\nFinished reduction run'
                     '-------------------------------------------------------')