コード例 #1
0
ファイル: tof_simulator.py プロジェクト: igresh/refnx-models
    def __init__(
            self,
            model,
            angle,
            L12=2859,
            footprint=60,
            L2S=120,
            dtheta=3.3,  # angular resolution
            lo_wavelength=2.8,
            hi_wavelength=18,
            dlambda=3.3,
            rebin=2):
        self.model = model

        # turn off resolution smearing
        self.model.dq = 0
        self.bkg = model.bkg.value
        self.angle = angle

        # the fractional width of a square wavelength resolution
        self.dlambda = dlambda / 100.
        self.rebin = rebin / 100.
        self.wavelength_bins = calculate_wavelength_bins(
            lo_wavelength, hi_wavelength, rebin)
        # nominal Q values
        bin_centre = 0.5 * (self.wavelength_bins[1:] +
                            self.wavelength_bins[:-1])
        self.q = general.q(angle, bin_centre)

        # keep a tally of the direct and reflected beam
        self.direct_beam = np.zeros((self.wavelength_bins.size - 1))
        self.reflected_beam = np.zeros((self.wavelength_bins.size - 1))

        # wavelength generator
        a = PN('PLP0000711.nx.hdf')
        q, i, di = a.process(normalise=False,
                             normalise_bins=False,
                             rebin_percent=0,
                             lo_wavelength=lo_wavelength,
                             hi_wavelength=hi_wavelength)
        q = q.squeeze()
        i = i.squeeze()
        self.spectrum_dist = SpectrumDist(q, i)

        # angular resolution generator, based on a trapezoidal distribution
        # The slit settings are the optimised set typically used in an
        # experiment
        self.dtheta = dtheta / 100.
        self.footprint = footprint
        s1, s2 = general.slit_optimiser(footprint,
                                        self.dtheta,
                                        angle=angle,
                                        L2S=L2S,
                                        L12=L12,
                                        verbose=False)
        div, alpha, beta = general.div(s1, s2, L12=L12)
        self.angular_dist = trapz(c=(alpha - beta) / 2. / alpha,
                                  d=(alpha + beta) / 2. / alpha,
                                  loc=-alpha,
                                  scale=2 * alpha)
コード例 #2
0
    def test_accumulate_files_reduce(self):
        # test by adding a file to itself. Should have smaller stats
        fnames = ['PLP0000708.nx.hdf', 'PLP0000708.nx.hdf']
        pths = [os.path.join(self.pth, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)

        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            # it should be processable
            fadd = PlatypusNexus(
                os.path.join(os.getcwd(), 'ADD_PLP0000708.nx.hdf'))
            fadd.process()

            # it should also be reduceable
            reducer = PlatypusReduce(
                os.path.join(self.pth, 'PLP0000711.nx.hdf'))

            datasets, reduced = reducer.reduce(
                os.path.join(os.getcwd(), 'ADD_PLP0000708.nx.hdf'))
            assert_('y' in reduced)

            # the error bars should be smaller
            datasets2, reduced2 = reducer.reduce(
                os.path.join(self.pth, 'PLP0000708.nx.hdf'))

            assert_(np.all(reduced['y_err'] < reduced2['y_err']))
コード例 #3
0
    def setUp(self):
        path = os.path.dirname(__file__)
        self.path = path

        self.f113 = PlatypusNexus(os.path.join(self.path,
                                                   'PLP0011613.nx.hdf'))
        self.f641 = PlatypusNexus(os.path.join(self.path,
                                                   'PLP0011641.nx.hdf'))
        return 0
コード例 #4
0
ファイル: test_platypusnexus.py プロジェクト: refnx/refnx
    def setUp(self):
        path = os.path.dirname(__file__)
        self.path = path

        self.f113 = PlatypusNexus(os.path.join(self.path, "PLP0011613.nx.hdf"))
        self.f641 = PlatypusNexus(os.path.join(self.path, "PLP0011641.nx.hdf"))
        self.cwd = os.getcwd()
        self.tmpdir = TemporaryDirectory()
        os.chdir(self.tmpdir.name)
        return 0
コード例 #5
0
    def setup_method(self, tmpdir):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        self.f113 = PlatypusNexus(os.path.join(self.pth, 'PLP0011613.nx.hdf'))
        self.f641 = PlatypusNexus(os.path.join(self.pth, 'PLP0011641.nx.hdf'))
        self.cwd = os.getcwd()

        self.tmpdir = tmpdir.strpath
        os.chdir(self.tmpdir)
        return 0
コード例 #6
0
    def setup_method(self, tmpdir, data_directory):
        self.pth = pjoin(data_directory, "reduce")

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)
            self.f113 = PlatypusNexus(pjoin(self.pth, "PLP0011613.nx.hdf"))
            self.f641 = PlatypusNexus(pjoin(self.pth, "PLP0011641.nx.hdf"))
        self.cwd = os.getcwd()

        self.tmpdir = tmpdir.strpath
        os.chdir(self.tmpdir)
        return 0
コード例 #7
0
    def setup_method(self, tmpdir):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            self.f113 = PlatypusNexus(
                os.path.join(self.pth, 'PLP0011613.nx.hdf'))
            self.f641 = PlatypusNexus(
                os.path.join(self.pth, 'PLP0011641.nx.hdf'))
        self.cwd = os.getcwd()

        self.tmpdir = tmpdir.strpath
        os.chdir(self.tmpdir)
        return 0
コード例 #8
0
ファイル: test_event.py プロジェクト: refnx/refnx
    def test_event_same_as_detector(self):
        # the detector file should be the same as the event file
        orig_file = PlatypusNexus(os.path.join(self.path,
                                              'PLP0011613.nx.hdf'))
        orig_det = orig_file.cat.detector
        event_det, fb  = event.process_event_stream(self.event_list,
                                                    [0, 23998],
                                                    orig_file.cat.t_bins,
                                                    orig_file.cat.y_bins,
                                                    orig_file.cat.x_bins)
        assert_equal(event_det, orig_det)

        # PlatypusNexus.process_event_stream should be the same as well
        fb, det, bm = orig_file.process_event_stream(frame_bins=[])
        assert_equal(event_det, orig_det)
コード例 #9
0
ファイル: test_event.py プロジェクト: llimeht/refnx
    def test_event_same_as_detector(self):
        # the detector file should be the same as the event file
        orig_file = PlatypusNexus(os.path.join(self.path,
                                               'PLP0011613.nx.hdf'))
        orig_det = orig_file.cat.detector
        frames = [np.arange(0, 23999)]
        event_det, fb = event.process_event_stream(self.event_list,
                                                   frames,
                                                   orig_file.cat.t_bins,
                                                   orig_file.cat.y_bins,
                                                   orig_file.cat.x_bins)
        assert_equal(event_det, orig_det)

        # PlatypusNexus.process_event_stream should be the same as well
        det, fc, bm = orig_file.process_event_stream(frame_bins=[])
        assert_equal(det, orig_det)
コード例 #10
0
ファイル: test_platypusnexus.py プロジェクト: refnx/refnx
    def test_accumulate_files_reduce(self):
        # test by adding a file to itself. Should have smaller stats
        fnames = ["PLP0000708.nx.hdf", "PLP0000708.nx.hdf"]
        pths = [os.path.join(self.path, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)

        # it should be processable
        fadd = PlatypusNexus(os.path.join(os.getcwd(), "ADD_PLP0000708.nx.hdf"))
        fadd.process()

        # it should also be reduceable
        reducer = ReducePlatypus(os.path.join(self.path, "PLP0000711.nx.hdf"))
        reduced = reducer.reduce(os.path.join(os.getcwd(), "ADD_PLP0000708.nx.hdf"))
        assert_("ydata" in reduced)

        # the error bars should be smaller
        reduced2 = reducer.reduce(os.path.join(self.path, "PLP0000708.nx.hdf"))

        assert_(np.all(reduced["ydata_sd"] < reduced2["ydata_sd"]))
コード例 #11
0
    def test_event_same_as_detector(self):
        # the detector file should be the same as the event file
        # warnings filter for pixel size
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)

            orig_file = PlatypusNexus(
                os.path.join(self.path, 'PLP0011641.nx.hdf'))

        orig_det = orig_file.cat.detector
        frames = [np.arange(0, 501744)]
        event_det, fb = event.process_event_stream(self.event_list, frames,
                                                   orig_file.cat.t_bins,
                                                   orig_file.cat.y_bins,
                                                   orig_file.cat.x_bins)
        assert_equal(event_det, orig_det)

        # PlatypusNexus.process_event_stream should be the same as well
        det, fc, bm = orig_file.process_event_stream(frame_bins=[])
        assert_equal(det, orig_det)
コード例 #12
0
    def test_PNR_metadata(self):
        self.f8861.process()
        self.f8862.process()
        self.f8863.process()
        self.f8864.process()

        # Check that we can read all of the flipper statuses in files

        # Flipper 1 on, flipper 2 on
        assert_almost_equal(self.f8861.cat.cat["pol_flip_current"], 5.0)
        assert_almost_equal(self.f8861.cat.cat["anal_flip_current"], 4.5)

        # Flipper 1 on, flipper 2 off
        assert_almost_equal(self.f8862.cat.cat["anal_flip_current"], 0)
        assert_almost_equal(self.f8862.cat.cat["pol_flip_current"], 5.0)

        # Flipper 1 off, flipper 2 on
        assert_almost_equal(self.f8863.cat.cat["anal_flip_current"], 4.5)
        assert_almost_equal(self.f8863.cat.cat["pol_flip_current"], 0)

        # Flipper 1 off, flipper 2 off
        assert_almost_equal(self.f8864.cat.cat["anal_flip_current"], 0)
        assert_almost_equal(self.f8864.cat.cat["pol_flip_current"], 0)

        # Check SpinChannel for each file
        assert self.f8861.spin_state == SpinChannel.UP_UP
        assert self.f8862.spin_state == SpinChannel.UP_DOWN
        assert self.f8863.spin_state == SpinChannel.DOWN_UP
        assert self.f8864.spin_state == SpinChannel.DOWN_DOWN

        # test spin channel setting
        # non spin analysed. mode is POL, not POLANAL
        pn = PlatypusNexus(pjoin(self.pth, "PLP0016427.nx.hdf"))
        assert pn.spin_state == SpinChannel.UP_UP
        pn = PlatypusNexus(pjoin(self.pth, "PLP0016426.nx.hdf"))
        assert pn.spin_state == SpinChannel.DOWN_DOWN
コード例 #13
0
    def test_accumulate_files_reduce(self):
        # test by adding a file to itself. Should have smaller stats
        fnames = ["PLP0000708.nx.hdf", "PLP0000708.nx.hdf"]
        pths = [pjoin(self.pth, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)
            # it should be processable
            fadd = PlatypusNexus(pjoin(os.getcwd(), "ADD_PLP0000708.nx.hdf"))
            fadd.process()

            # it should also be reduceable
            reducer = PlatypusReduce(pjoin(self.pth, "PLP0000711.nx.hdf"))

            datasets, reduced = reducer.reduce(
                pjoin(os.getcwd(), "ADD_PLP0000708.nx.hdf"))
            assert_("y" in reduced)

            # the error bars should be smaller
            datasets2, reduced2 = reducer.reduce(
                pjoin(self.pth, "PLP0000708.nx.hdf"))

            assert_(np.all(reduced["y_err"] < reduced2["y_err"]))
コード例 #14
0
    def test_event_same_as_detector(self, event_setup):
        # the detector file should be the same as the event file
        # warnings filter for pixel size
        pth = pjoin(event_setup.data_directory, "reduce")

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)

            orig_file = PlatypusNexus(pjoin(pth, "PLP0011641.nx.hdf"))

        orig_det = orig_file.cat.detector
        frames = [np.arange(0, 501744)]
        event_det, fb = event.process_event_stream(
            event_setup.event_list,
            frames,
            orig_file.cat.t_bins,
            orig_file.cat.y_bins,
            orig_file.cat.x_bins,
        )
        assert_equal(event_det, orig_det)

        # PlatypusNexus.process_event_stream should be the same as well
        det, fc, bm = orig_file.process_event_stream(frame_bins=[])
        assert_equal(det, orig_det)
コード例 #15
0
    def reducer(self, callback=None):
        """
        Reduce all the entries in reduction_entries

        Parameters
        ----------
        callback : callable
            Function, `f(percent_finished)` that is called with the current
            percentage progress of the reduction
        """

        # refnx.reduce.reduce needs you to be in the directory where you're
        # going to write files to
        if self.output_directory:
            os.chdir(self.output_directory)

        # if no data directory was specified then assume it's the cwd
        data_directory = self.data_directory
        if not data_directory:
            data_directory = "./"

        def full_path(fname):
            f = os.path.join(data_directory, fname)
            return f

        # if the streamed directory isn't mentioned then assume it's the same
        # as the data directory
        streamed_directory = self.streamed_directory
        if not os.path.isdir(streamed_directory):
            self.streamed_directory = data_directory

        logging.info("-------------------------------------------------------"
                     "\nStarting reduction run")
        logging.info(
            "data_folder={data_directory}, trim_trailing=True, "
            "lo_wavelength={low_wavelength}, "
            "hi_wavelength={high_wavelength}, "
            "rebin_percent={rebin_percent}, "
            "normalise={monitor_normalisation}, "
            "background={background_subtraction} "
            "eventmode={streamed_reduction} "
            "event_folder={streamed_directory}".format(**self.__dict__))

        # sets up time slices for event reduction
        if self.streamed_reduction:
            eventmode = np.arange(self.stream_start, self.stream_end,
                                  self.stream_duration)
            eventmode = np.r_[eventmode, self.stream_end]
        else:
            eventmode = None

        # are you manual beamfinding?
        peak_pos = None
        if self.manual_beam_find and self.manual_beam_finder is not None:
            peak_pos = -1

        idx = 0

        cached_direct_beams = {}

        for row, val in self.reduction_entries.items():
            if not val["use"]:
                continue

            flood = None
            if val["flood"]:
                flood = full_path(val["flood"])

            combined_dataset = None

            # process entries one by one
            for ref, db in zip(
                ["reflect-1", "reflect-2", "reflect-3"],
                ["direct-1", "direct-2", "direct-3"],
            ):
                reflect = val[ref]
                direct = val[db]

                # if the file doesn't exist there's no point continuing
                if (not os.path.isfile(full_path(reflect))) or (
                        not os.path.isfile(full_path(direct))):
                    continue

                # which of the nspectra to reduce (or all)
                ref_pn = PlatypusNexus(full_path(reflect))

                if direct not in cached_direct_beams:
                    cached_direct_beams[direct] = PlatypusReduce(
                        direct, data_folder=data_directory)

                reducer = cached_direct_beams[direct]

                try:
                    reduced = reducer(
                        ref_pn,
                        scale=val["scale"],
                        h5norm=flood,
                        lo_wavelength=self.low_wavelength,
                        hi_wavelength=self.high_wavelength,
                        rebin_percent=self.rebin_percent,
                        normalise=self.monitor_normalisation,
                        background=self.background_subtraction,
                        manual_beam_find=self.manual_beam_finder,
                        peak_pos=peak_pos,
                        eventmode=eventmode,
                        event_folder=streamed_directory,
                    )
                except Exception as e:
                    # typical Exception would be ValueError for non overlapping
                    # angles
                    logging.info(e)
                    continue

                logging.info("Reduced {} vs {}, scale={}, angle={}".format(
                    reflect,
                    direct,
                    val["scale"],
                    reduced[1]["omega"][0, 0],
                ))

                if combined_dataset is None:
                    combined_dataset = ReflectDataset()

                    fname = basename_datafile(reflect)
                    fname_dat = os.path.join(self.output_directory,
                                             "c_{0}.dat".format(fname))
                    fname_xml = os.path.join(self.output_directory,
                                             "c_{0}.xml".format(fname))

                try:
                    combined_dataset.add_data(
                        reducer.data(),
                        requires_splice=True,
                        trim_trailing=True,
                    )
                except ValueError as e:
                    # datasets don't overlap
                    logging.info(e)
                    continue

            if combined_dataset is not None:
                # after you've finished reducing write a combined file.
                with open(fname_dat, "wb") as f:
                    combined_dataset.save(f)
                with open(fname_xml, "wb") as f:
                    combined_dataset.save_xml(f)
                logging.info("Written combined files: {} and {}".format(
                    fname_dat, fname_xml))

            # can be used to create a progress bar
            idx += 1
            if callback is not None:
                ok = callback(100 * idx / len(self.reduction_entries))
                if not ok:
                    break

        logging.info("\nFinished reduction run"
                     "-------------------------------------------------------")
コード例 #16
0
ファイル: test_platypusnexus.py プロジェクト: tjmurdoch/refnx
class TestPlatypusNexus(unittest.TestCase):

    def setUp(self):
        path = os.path.dirname(__file__)
        self.path = path

        self.f113 = PlatypusNexus(os.path.join(self.path,
                                               'PLP0011613.nx.hdf'))
        self.f641 = PlatypusNexus(os.path.join(self.path,
                                               'PLP0011641.nx.hdf'))
        self.cwd = os.getcwd()
        self.tmpdir = TemporaryDirectory()
        os.chdir(self.tmpdir.name)
        return 0

    def tearDown(self):
        os.chdir(self.cwd)
        self.tmpdir.cleanup()

    def test_chod(self):
        flight_length = self.f113.chod()
        assert_almost_equal(flight_length[0], 7141.413818359375)
        assert_almost_equal(flight_length[1], 808)

        flight_length = self.f641.chod(omega=1.8, twotheta=3.6)
        assert_almost_equal(flight_length[0], 7146.3567785516016)
        assert_almost_equal(flight_length[1], 808)

    def test_phase_angle(self):
        # TODO. Add more tests where the phase_angle isn't zero.
        phase_angle, master_opening = self.f641.phase_angle()
        assert_almost_equal(phase_angle, 0)
        assert_almost_equal(master_opening, 1.04719755)

    def test_background_subtract_line(self):
        # checked each step of the background subtraction with IGOR
        # so this test background correction should be correct.

        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        mask = np.zeros(201, np.bool)
        mask[30:70] = True
        mask[130:160] = True

        profile, profile_sd = plp.background_subtract_line(yvals,
                                                           yvals_sd,
                                                           mask)

        verified_data = np.load(os.path.join(self.path,
                                             'background_subtract.npy'))

        assert_almost_equal(verified_data, np.c_[profile, profile_sd])

    def test_find_specular_ridge(self):
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 1000, 0, 1))
        detector = np.repeat(yvals[:, np.newaxis], 1000, axis=1).T
        detector_sd = np.sqrt(detector)
        output = plp.find_specular_ridge(detector[np.newaxis, :],
                                         detector_sd[np.newaxis, :])
        assert_(len(output) == 5)
        assert_almost_equal(output[0][0], 100)

    def test_background_subtract(self):
        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        # now make an (N, T, Y) detector image
        n_tbins = 10
        detector = np.repeat(yvals,
                             n_tbins).reshape(xvals.size, n_tbins).T
        detector_sd = np.repeat(yvals_sd,
                                n_tbins).reshape(xvals.size, n_tbins).T
        detector = detector.reshape(1, n_tbins, xvals.size)
        detector_sd = detector_sd.reshape(1, n_tbins, xvals.size)

        mask = np.zeros((1, n_tbins, 201), np.bool)
        mask[:, :, 30:70] = True
        mask[:, :, 130:160] = True

        det_bkg, detSD_bkg = plp.background_subtract(detector,
                                                     detector_sd,
                                                     mask)

        # each of the (N, T) entries should have the same background subtracted
        # entries
        verified_data = np.load(os.path.join(self.path,
                                             'background_subtract.npy'))

        it = np.nditer(detector, flags=['multi_index'])
        it.remove_axis(2)
        while not it.finished:
            profile = det_bkg[it.multi_index]
            profile_sd = detSD_bkg[it.multi_index]
            assert_almost_equal(verified_data, np.c_[profile, profile_sd])
            it.iternext()

    def test_calculate_bins(self):
        bins = plp.calculate_wavelength_bins(2., 18, 2.)
        assert_almost_equal(bins[0], 1.98)
        assert_almost_equal(bins[-1], 18.18)

    def test_event(self):
        # When you use event mode processing, make sure the right amount of
        # spectra are created
        out = self.f113.process(eventmode=[0, 900, 1800], integrate=0)
        assert_(np.size(out[1], axis=0) == 2)

    def test_event_folder(self):
        # When you use event mode processing, make sure the right amount of
        # spectra are created
        self.f113.process(eventmode=[0, 900, 1800], integrate=0,
                          event_folder=self.path)

    def test_multiple_acquisitions(self):
        """
        TODO: add a dataset which has multiple spectra in it, and make sure it
        processes.
        """
        pass

    def test_reduction_runs(self):
        # just check it runs
        self.f113.process()

        # check that event mode reduction gives the same output as non-event
        # mode reduction.
        spectrum0 = self.f113.process(direct=True)
        spectrum1 = self.f113.process(direct=True, eventmode=[], integrate=0)
        assert_allclose(spectrum0[1][0], spectrum1[1][0], rtol=0.001)

        # check that the wavelength resolution is roughly right, between 7 and
        # 8%.
        res = (self.f113.processed_spectrum['m_lambda_fwhm'][0] /
               self.f113.processed_spectrum['m_lambda'][0])
        assert_array_less(res, np.ones_like(res) * 0.08)
        assert_array_less(np.ones_like(res) * 0.07, res)

    def test_save_spectrum(self):
        # test saving spectrum
        self.f113.process()

        # can save the spectra by supplying a filename
        self.f113.write_spectrum_xml('test.xml')
        self.f113.write_spectrum_dat('test.dat')

        # can save by supplying file handle:
        with open('test.xml', 'wb') as f:
            self.f113.write_spectrum_xml(f)

        # can save by supplying file handle:
        with open('test.dat', 'wb') as f:
            self.f113.write_spectrum_xml(f)

    def test_accumulate_files(self):
        fnames = ['PLP0000708.nx.hdf', 'PLP0000709.nx.hdf']
        pths = [os.path.join(self.path, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)
        f8, f9, fadd = None, None, None

        try:
            f8 = h5py.File(os.path.join(self.path,
                                        'PLP0000708.nx.hdf'), 'r')
            f9 = h5py.File(os.path.join(self.path,
                                        'PLP0000709.nx.hdf'), 'r')
            fadd = h5py.File(os.path.join(os.getcwd(),
                                          'ADD_PLP0000708.nx.hdf'), 'r')

            f8d = f8['entry1/data/hmm'][0]
            f9d = f9['entry1/data/hmm'][0]
            faddd = fadd['entry1/data/hmm'][0]
            assert_equal(faddd, f8d + f9d)
        finally:
            if f8 is not None:
                f8.close()
            if f9 is not None:
                f9.close()
            if fadd is not None:
                fadd.close()

    def test_accumulate_files_reduce(self):
        # test by adding a file to itself. Should have smaller stats
        fnames = ['PLP0000708.nx.hdf', 'PLP0000708.nx.hdf']
        pths = [os.path.join(self.path, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)

        # it should be processable
        fadd = PlatypusNexus(os.path.join(os.getcwd(),
                                          'ADD_PLP0000708.nx.hdf'))
        fadd.process()

        # it should also be reduceable
        reducer = PlatypusReduce(os.path.join(self.path,
                                              'PLP0000711.nx.hdf'))
        reduced = reducer.reduce(os.path.join(os.getcwd(),
                                              'ADD_PLP0000708.nx.hdf'))
        assert_('ydata' in reduced)

        # the error bars should be smaller
        reduced2 = reducer.reduce(os.path.join(self.path, 'PLP0000708.nx.hdf'))

        assert_(np.all(reduced['ydata_sd'] < reduced2['ydata_sd']))

    def test_manual_beam_find(self):
        # you can specify a function that finds where the specular ridge is.
        def manual_beam_find(detector, detector_sd):
            beam_centre = np.zeros(len(detector))
            beam_sd = np.zeros(len(detector))
            beam_centre += 50
            beam_sd += 5
            return beam_centre, beam_sd, np.array([40]), np.array([60]), [[]]

        # the manual beam find is only mandatory when peak_pos == -1.
        self.f113.process(manual_beam_find=manual_beam_find,
                          peak_pos=-1)
        assert_equal(self.f113.processed_spectrum['m_beampos'][0], 50)

        # manual beam finding also specifies the lower and upper pixel of the
        # foreground
        assert_equal(self.f113.processed_spectrum['lopx'][0], 40)
        assert_equal(self.f113.processed_spectrum['hipx'][0], 60)

    def test_fore_back_region(self):
        # calculation of foreground and background regions is done correctly
        centres = np.array([100., 90.])
        sd = np.array([5., 11.5])
        lopx, hipx, background_pixels = fore_back_region(centres, sd)

        assert_(len(lopx) == 2)
        assert_(len(hipx) == 2)
        assert_(len(background_pixels) == 2)
        assert_(isinstance(lopx[0], numbers.Integral))
        assert_(isinstance(hipx[0], numbers.Integral))

        calc_lower = np.floor(centres - sd * EXTENT_MULT)
        assert_equal(lopx, calc_lower)
        calc_higher = np.ceil(centres + sd * EXTENT_MULT)
        assert_equal(hipx, calc_higher)

        y1 = np.atleast_1d(
            np.round(lopx - PIXEL_OFFSET).astype('int'))
        y0 = np.atleast_1d(
            np.round(lopx - PIXEL_OFFSET - EXTENT_MULT * sd).astype('int'))

        y2 = np.atleast_1d(
            np.round(hipx + PIXEL_OFFSET).astype('int'))
        y3 = np.atleast_1d(
            np.round(hipx + PIXEL_OFFSET + EXTENT_MULT * sd).astype('int'))

        bp = np.r_[np.arange(y0[0], y1[0] + 1),
                   np.arange(y2[0], y3[0] + 1)]

        assert_equal(bp, background_pixels[0])

    def test_basename_datafile(self):
        # check that the right basename is returned
        pth = 'a/b/c.nx.hdf'
        assert_(basename_datafile(pth) == 'c')

        pth = 'c.nx.hdf'
        assert_(basename_datafile(pth) == 'c')
コード例 #17
0
    def __init__(
        self,
        model,
        angle,
        L12=2859,
        footprint=60,
        L2S=120,
        dtheta=3.3,
        lo_wavelength=2.8,
        hi_wavelength=18,
        dlambda=3.3,
        rebin=2,
        gravity=False,
        force_gaussian=False,
        force_uniform_wavelength=False,
    ):
        self.model = model

        self.bkg = model.bkg.value
        self.angle = angle

        # dlambda refers to the FWHM of the gaussian approximation to a uniform
        # distribution. The full width of the uniform distribution is
        # dlambda/0.68.
        self.dlambda = dlambda / 100.0
        # the rebin percentage refers to the full width of the bins. You have to
        # multiply this value by 0.68 to get the equivalent contribution to the
        # resolution function.
        self.rebin = rebin / 100.0
        self.wavelength_bins = calculate_wavelength_bins(
            lo_wavelength, hi_wavelength, rebin
        )
        bin_centre = 0.5 * (
            self.wavelength_bins[1:] + self.wavelength_bins[:-1]
        )

        # angular deviation due to gravity
        # --> no correction for gravity affecting width of angular resolution
        elevations = 0
        if gravity:
            speeds = general.wavelength_velocity(bin_centre)
            # trajectories through slits for different wavelengths
            trajectories = pm.find_trajectory(L12 / 1000.0, 0, speeds)
            # elevation at sample
            elevations = pm.elevation(
                trajectories, speeds, (L12 + L2S) / 1000.0
            )

        # nominal Q values
        self.q = general.q(angle - elevations, bin_centre)

        # keep a tally of the direct and reflected beam
        self.direct_beam = np.zeros((self.wavelength_bins.size - 1))
        self.reflected_beam = np.zeros((self.wavelength_bins.size - 1))

        # beam monitor counts for normalisation
        self.bmon_direct = 0
        self.bmon_reflect = 0

        self.gravity = gravity

        # wavelength generator
        self.force_uniform_wavelength = force_uniform_wavelength
        if force_uniform_wavelength:
            self.spectrum_dist = uniform(
                loc=lo_wavelength - 1, scale=hi_wavelength - lo_wavelength + 1
            )
        else:
            a = PN("PLP0000711.nx.hdf")
            q, i, di = a.process(
                normalise=False,
                normalise_bins=False,
                rebin_percent=0.5,
                lo_wavelength=max(0, lo_wavelength - 1),
                hi_wavelength=hi_wavelength + 1,
            )
            q = q.squeeze()
            i = i.squeeze()
            self.spectrum_dist = SpectrumDist(q, i)

        self.force_gaussian = force_gaussian

        # angular resolution generator, based on a trapezoidal distribution
        # The slit settings are the optimised set typically used in an
        # experiment. dtheta/theta refers to the FWHM of a Gaussian
        # approximation to a trapezoid.

        # stores the q vectors contributing towards each datapoint
        self._res_kernel = {}
        self._min_samples = 0

        self.dtheta = dtheta / 100.0
        self.footprint = footprint
        self.angle = angle
        self.L2S = L2S
        self.L12 = L12
        s1, s2 = general.slit_optimiser(
            footprint,
            self.dtheta,
            angle=angle,
            L2S=L2S,
            L12=L12,
            verbose=False,
        )
        div, alpha, beta = general.div(s1, s2, L12=L12)
        self.div, self.s1, self.s2 = s1, s2, div

        if force_gaussian:
            self.angular_dist = norm(scale=div / 2.3548)
        else:
            self.angular_dist = trapz(
                c=(alpha - beta) / 2.0 / alpha,
                d=(alpha + beta) / 2.0 / alpha,
                loc=-alpha,
                scale=2 * alpha,
            )
コード例 #18
0
class TestPlatypusNexus(unittest.TestCase):

    def setUp(self):
        path = os.path.dirname(__file__)
        self.path = path

        self.f113 = PlatypusNexus(os.path.join(self.path,
                                                   'PLP0011613.nx.hdf'))
        self.f641 = PlatypusNexus(os.path.join(self.path,
                                                   'PLP0011641.nx.hdf'))
        return 0

    def test_chod(self):
        flight_length = self.f113.chod()
        assert_almost_equal(flight_length[0], 7141.413818359375)
        assert_almost_equal(flight_length[1], 808)

        flight_length = self.f641.chod(omega=1.8, twotheta=3.6)
        assert_almost_equal(flight_length[0], 7146.3567785516016)
        assert_almost_equal(flight_length[1], 808)

    def test_phase_angle(self):
        # TODO. Add more tests where the phase_angle isn't zero.
        phase_angle, master_opening = self.f641.phase_angle()
        assert_almost_equal(phase_angle, 0)
        assert_almost_equal(master_opening, 1.04719755)

    def test_background_subtract_line(self):
        # checked each step of the background subtraction with IGOR
        # so this test background correction should be correct.

        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        mask = np.zeros(201, np.bool)
        mask[30:70] = True
        mask[130:160] = True

        profile, profile_sd = plp.background_subtract_line(yvals,
                                                           yvals_sd,
                                                           mask)

        verified_data = np.load(os.path.join(self.path,
                                             'background_subtract.npy'))

        assert_almost_equal(verified_data, np.c_[profile, profile_sd])

    def test_find_specular_ridge(self):
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 1000, 0, 1))
        detector = np.repeat(yvals[:, np.newaxis], 1000, axis=1).T
        detector_sd = np.sqrt(detector)
        output = plp.find_specular_ridge(detector[np.newaxis, :], detector_sd[np.newaxis, :])
        assert_(len(output) == 2)
        assert_almost_equal(output[0][0], 100)

    def test_background_subtract(self):
        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        # now make an (N, T, Y) detector image
        n_tbins = 10
        detector = np.repeat(yvals, n_tbins).reshape(xvals.size, n_tbins).T
        detector_sd = np.repeat(yvals_sd, n_tbins).reshape(xvals.size, n_tbins).T
        detector = detector.reshape(1, n_tbins, xvals.size)
        detector_sd = detector_sd.reshape(1, n_tbins, xvals.size)

        mask = np.zeros((1, n_tbins, 201), np.bool)
        mask[:, :, 30:70] = True
        mask[:, :, 130:160] = True

        det_bkg, detSD_bkg = plp.background_subtract(detector,
                                                     detector_sd,
                                                     mask)

        # each of the (N, T) entries should have the same background subtracted
        # entries
        verified_data = np.load(os.path.join(self.path,
                                             'background_subtract.npy'))

        it = np.nditer(detector, flags=['multi_index'])
        it.remove_axis(2)
        while not it.finished:
            profile = det_bkg[it.multi_index]
            profile_sd = detSD_bkg[it.multi_index]
            assert_almost_equal(verified_data, np.c_[profile, profile_sd])
            it.iternext()

    def test_calculate_bins(self):
        bins = plp.calculate_wavelength_bins(2., 18, 2.)
        assert_almost_equal(bins[0], 1.98)
        assert_almost_equal(bins[-1], 18.18)

    def test_event(self):
        # When you use event mode processing, make sure the right amount of
        # spectra are created
        out = self.f113.process(eventmode=[0, 900, 1800], integrate=0)
        assert_(np.size(out[1], axis=0) == 2)

    def test_event_folder(self):
        # When you use event mode processing, make sure the right amount of
        # spectra are created
        out = self.f113.process(eventmode=[0, 900, 1800], integrate=0,
                                event_folder=self.path)

    def test_multiple_acquisitions(self):
        """
        TODO: add a dataset which has multiple spectra in it, and make sure it
        processes.
        """
        pass

    def test_reduction_runs(self):
        # just check it runs
        self.f113.process()

        # check that event mode reduction gives the same output as non-event
        # mode reduction.
        spectrum0 = self.f113.process(direct=True)
        spectrum1 = self.f113.process(direct=True, eventmode=[], integrate=0)
        assert_allclose(spectrum0[1][0], spectrum1[1][0], rtol=0.001)

        # check that the wavelength resolution is roughly right, between 7 and
        # 8%.
        res = (self.f113.processed_spectrum['m_lambda_fwhm'][0] /
               self.f113.processed_spectrum['m_lambda'][0])
        assert_array_less(res, np.ones_like(res) * 0.08)
        assert_array_less(np.ones_like(res) * 0.07, res)

    def test_save_spectrum(self):
        # test saving spectrum
        self.f113.process()

        # can save the spectra by supplying a filename
        self.f113.write_spectrum_xml('test.xml')
        self.f113.write_spectrum_dat('test.dat')

        # can save by supplying file handle:
        with open('test.xml', 'wb') as f:
            self.f113.write_spectrum_xml(f)

        # can save by supplying file handle:
        with open('test.dat', 'wb') as f:
            self.f113.write_spectrum_xml(f)

    def test_accumulate_files(self):
        fnames = ['PLP0000708.nx.hdf', 'PLP0000709.nx.hdf']
        pths = [os.path.join(self.path, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)
        f8, f9, fadd = None, None, None

        try:
            f8 = h5py.File(os.path.join(self.path,
                                        'PLP0000708.nx.hdf'), 'r')
            f9 = h5py.File(os.path.join(self.path,
                                        'PLP0000709.nx.hdf'), 'r')
            fadd = h5py.File(os.path.join(os.getcwd(),
                                          'ADD_PLP0000708.nx.hdf'), 'r')

            f8d = f8['entry1/data/hmm'][0]
            f9d = f9['entry1/data/hmm'][0]
            faddd = fadd['entry1/data/hmm'][0]
            assert_equal(faddd, f8d + f9d)
        finally:
            if f8 is not None:
                f8.close()
            if f9 is not None:
                f9.close()
            if fadd is not None:
                fadd.close()

    def test_accumulate_files_reduce(self):
        # test by adding a file to itself. Should have smaller stats
        fnames = ['PLP0000708.nx.hdf', 'PLP0000708.nx.hdf']
        pths = [os.path.join(self.path, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)

        # it should be processable
        fadd = PlatypusNexus(os.path.join(os.getcwd(),
                                              'ADD_PLP0000708.nx.hdf'))
        fadd.process()

        # it should also be reduceable
        reducer = ReducePlatypus(os.path.join(self.path,
                                            'PLP0000711.nx.hdf'))
        reduced = reducer.reduce(os.path.join(os.getcwd(), 'ADD_PLP0000708.nx.hdf'))
        assert_('ydata' in reduced)

        # the error bars should be smaller
        reduced2 = reducer.reduce(os.path.join(self.path, 'PLP0000708.nx.hdf'))

        assert_(np.all(reduced['ydata_sd'] < reduced2['ydata_sd']))
コード例 #19
0
class TestPlatypusNexus(object):
    @pytest.fixture(autouse=True)
    def setup_method(self, tmpdir):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        self.f113 = PlatypusNexus(os.path.join(self.pth, 'PLP0011613.nx.hdf'))
        self.f641 = PlatypusNexus(os.path.join(self.pth, 'PLP0011641.nx.hdf'))
        self.cwd = os.getcwd()

        self.tmpdir = tmpdir.strpath
        os.chdir(self.tmpdir)
        return 0

    def teardown_method(self):
        os.chdir(self.cwd)

    def test_chod(self):
        flight_length = self.f113.chod()
        assert_almost_equal(flight_length[0], 7141.413818359375)
        assert_almost_equal(flight_length[1], 808)

        flight_length = self.f641.chod(omega=1.8, twotheta=3.6)
        assert_almost_equal(flight_length[0], 7146.3567785516016)
        assert_almost_equal(flight_length[1], 808)

    def test_phase_angle(self):
        # TODO. Add more tests where the phase_angle isn't zero.
        phase_angle, master_opening = self.f641.phase_angle()
        assert_almost_equal(phase_angle, 0)
        assert_almost_equal(master_opening, 1.04719755)

    def test_background_subtract_line(self):
        # checked each step of the background subtraction with IGOR
        # so this test background correction should be correct.

        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        mask = np.zeros(201, np.bool)
        mask[30:70] = True
        mask[130:160] = True

        profile, profile_sd = plp.background_subtract_line(
            yvals, yvals_sd, mask)

        verified_data = np.load(
            os.path.join(self.pth, 'background_subtract.npy'))

        assert_almost_equal(verified_data, np.c_[profile, profile_sd])

    def test_find_specular_ridge(self):
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 1000, 0, 1))
        detector = np.repeat(yvals[:, np.newaxis], 1000, axis=1).T
        detector_sd = np.sqrt(detector)
        output = plp.find_specular_ridge(detector[np.newaxis, :],
                                         detector_sd[np.newaxis, :])
        assert_(len(output) == 5)
        assert_almost_equal(output[0][0], 100)

    def test_background_subtract(self):
        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        # now make an (N, T, Y) detector image
        n_tbins = 10
        detector = np.repeat(yvals, n_tbins).reshape(xvals.size, n_tbins).T
        detector_sd = np.repeat(yvals_sd,
                                n_tbins).reshape(xvals.size, n_tbins).T
        detector = detector.reshape(1, n_tbins, xvals.size)
        detector_sd = detector_sd.reshape(1, n_tbins, xvals.size)

        mask = np.zeros((1, n_tbins, 201), np.bool)
        mask[:, :, 30:70] = True
        mask[:, :, 130:160] = True

        det_bkg, detSD_bkg = plp.background_subtract(detector, detector_sd,
                                                     mask)

        # each of the (N, T) entries should have the same background subtracted
        # entries
        verified_data = np.load(
            os.path.join(self.pth, 'background_subtract.npy'))

        it = np.nditer(detector, flags=['multi_index'])
        it.remove_axis(2)
        while not it.finished:
            profile = det_bkg[it.multi_index]
            profile_sd = detSD_bkg[it.multi_index]
            assert_almost_equal(verified_data, np.c_[profile, profile_sd])
            it.iternext()

    def test_calculate_bins(self):
        bins = plp.calculate_wavelength_bins(2., 18, 2.)
        assert_almost_equal(bins[0], 1.98)
        assert_almost_equal(bins[-1], 18.18)

    def test_event(self):
        # When you use event mode processing, make sure the right amount of
        # spectra are created
        out = self.f113.process(eventmode=[0, 900, 1800], integrate=0)
        assert_(np.size(out[1], axis=0) == 2)

    def test_event_folder(self):
        # When you use event mode processing, make sure the right amount of
        # spectra are created
        self.f113.process(eventmode=[0, 900, 1800],
                          integrate=0,
                          event_folder=self.pth)

    def test_multiple_acquisitions(self):
        """
        TODO: add a dataset which has multiple spectra in it, and make sure it
        processes.
        """
        pass

    def test_reduction_runs(self):
        # just check it runs
        self.f113.process()

        # check that event mode reduction gives the same output as non-event
        # mode reduction.
        spectrum0 = self.f113.process(direct=True)
        spectrum1 = self.f113.process(direct=True, eventmode=[], integrate=0)
        assert_allclose(spectrum0[1][0], spectrum1[1][0], rtol=0.001)

        # check that the wavelength resolution is roughly right, between 7 and
        # 8%.
        res = (self.f113.processed_spectrum['m_lambda_fwhm'][0] /
               self.f113.processed_spectrum['m_lambda'][0])
        assert_array_less(res, np.ones_like(res) * 0.08)
        assert_array_less(np.ones_like(res) * 0.07, res)

    def test_save_spectrum(self):
        # test saving spectrum
        self.f113.process()

        # can save the spectra by supplying a filename
        self.f113.write_spectrum_xml(os.path.join(self.tmpdir, 'test.xml'))
        self.f113.write_spectrum_dat(os.path.join(self.tmpdir, 'test.dat'))

        # can save by supplying file handle:
        with open(os.path.join(self.tmpdir, 'test.xml'), 'wb') as f:
            self.f113.write_spectrum_xml(f)

    def test_accumulate_files(self):
        fnames = ['PLP0000708.nx.hdf', 'PLP0000709.nx.hdf']
        pths = [os.path.join(self.pth, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)
        f8, f9, fadd = None, None, None

        try:
            f8 = h5py.File(os.path.join(self.pth, 'PLP0000708.nx.hdf'), 'r')
            f9 = h5py.File(os.path.join(self.pth, 'PLP0000709.nx.hdf'), 'r')
            fadd = h5py.File(
                os.path.join(self.tmpdir, 'ADD_PLP0000708.nx.hdf'), 'r')

            f8d = f8['entry1/data/hmm'][0]
            f9d = f9['entry1/data/hmm'][0]
            faddd = fadd['entry1/data/hmm'][0]
            assert_equal(faddd, f8d + f9d)
        finally:
            if f8 is not None:
                f8.close()
            if f9 is not None:
                f9.close()
            if fadd is not None:
                fadd.close()

    def test_accumulate_files_reduce(self):
        # test by adding a file to itself. Should have smaller stats
        fnames = ['PLP0000708.nx.hdf', 'PLP0000708.nx.hdf']
        pths = [os.path.join(self.pth, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)

        # it should be processable
        fadd = PlatypusNexus(os.path.join(os.getcwd(),
                                          'ADD_PLP0000708.nx.hdf'))
        fadd.process()

        # it should also be reduceable
        reducer = PlatypusReduce(os.path.join(self.pth, 'PLP0000711.nx.hdf'))
        datasets, reduced = reducer.reduce(
            os.path.join(os.getcwd(), 'ADD_PLP0000708.nx.hdf'))
        assert_('y' in reduced)

        # the error bars should be smaller
        datasets2, reduced2 = reducer.reduce(
            os.path.join(self.pth, 'PLP0000708.nx.hdf'))

        assert_(np.all(reduced['y_err'] < reduced2['y_err']))

    def test_manual_beam_find(self):
        # you can specify a function that finds where the specular ridge is.
        def manual_beam_find(detector, detector_sd):
            beam_centre = np.zeros(len(detector))
            beam_sd = np.zeros(len(detector))
            beam_centre += 50
            beam_sd += 5
            return beam_centre, beam_sd, np.array([40]), np.array([60]), [[]]

        # the manual beam find is only mandatory when peak_pos == -1.
        # the beam_sd is much larger than the beam divergence, so a warning
        # should be raised.
        with pytest.warns(UserWarning):
            self.f113.process(manual_beam_find=manual_beam_find, peak_pos=-1)
        assert_equal(self.f113.processed_spectrum['m_beampos'][0], 50)

        # manual beam finding also specifies the lower and upper pixel of the
        # foreground
        assert_equal(self.f113.processed_spectrum['lopx'][0], 40)
        assert_equal(self.f113.processed_spectrum['hipx'][0], 60)

    def test_fore_back_region(self):
        # calculation of foreground and background regions is done correctly
        centres = np.array([100., 90.])
        sd = np.array([5., 11.5])
        lopx, hipx, background_pixels = fore_back_region(centres, sd)

        assert_(len(lopx) == 2)
        assert_(len(hipx) == 2)
        assert_(len(background_pixels) == 2)
        assert_(isinstance(lopx[0], numbers.Integral))
        assert_(isinstance(hipx[0], numbers.Integral))

        calc_lower = np.floor(centres - sd * EXTENT_MULT)
        assert_equal(lopx, calc_lower)
        calc_higher = np.ceil(centres + sd * EXTENT_MULT)
        assert_equal(hipx, calc_higher)

        y1 = np.atleast_1d(np.round(lopx - PIXEL_OFFSET).astype('int'))
        y0 = np.atleast_1d(
            np.round(lopx - PIXEL_OFFSET - EXTENT_MULT * sd).astype('int'))

        y2 = np.atleast_1d(np.round(hipx + PIXEL_OFFSET).astype('int'))
        y3 = np.atleast_1d(
            np.round(hipx + PIXEL_OFFSET + EXTENT_MULT * sd).astype('int'))

        bp = np.r_[np.arange(y0[0], y1[0] + 1), np.arange(y2[0], y3[0] + 1)]

        assert_equal(bp, background_pixels[0])

    def test_basename_datafile(self):
        # check that the right basename is returned
        pth = 'a/b/c.nx.hdf'
        assert_(basename_datafile(pth) == 'c')

        pth = 'c.nx.hdf'
        assert_(basename_datafile(pth) == 'c')

    def test_floodfield_correction(self):
        # check that flood field calculation works
        # the values were worked out by hand on a randomly
        # generated array
        test_norm = np.array(
            [1.12290503, 1.23743017, 0.8603352, 0.70111732, 1.07821229])
        test_norm_sd = np.array(
            [0.05600541, 0.05879208, 0.04902215, 0.04425413, 0.05487956])

        fname = os.path.join(self.pth, 'flood.h5')
        with h5py.File(fname, 'r') as f:
            norm, norm_sd = create_detector_norm(f, 3.5, -3.5)

            assert_almost_equal(norm, test_norm, 6)
            assert_almost_equal(norm_sd, test_norm_sd, 6)

            norm, norm_sd = create_detector_norm(f, -3.5, 3.5)
            assert_almost_equal(norm, test_norm, 6)
            assert_almost_equal(norm_sd, test_norm_sd, 6)
コード例 #20
0
    def setup_method(self, tmpdir, data_directory):
        self.pth = pjoin(data_directory, "reduce")

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)
            self.f113 = PlatypusNexus(pjoin(self.pth, "PLP0011613.nx.hdf"))
            self.f641 = PlatypusNexus(pjoin(self.pth, "PLP0011641.nx.hdf"))

            # These PNR datasets all have different flipper settings
            # which should be read accurately.
            self.f8861 = PlatypusNexus(
                pjoin(self.pth, "PNR_files/PLP0008861.nx.hdf"))
            self.f8862 = PlatypusNexus(
                pjoin(self.pth, "PNR_files/PLP0008862.nx.hdf"))
            self.f8863 = PlatypusNexus(
                pjoin(self.pth, "PNR_files/PLP0008863.nx.hdf"))
            self.f8864 = PlatypusNexus(
                pjoin(self.pth, "PNR_files/PLP0008864.nx.hdf"))

        self.cwd = os.getcwd()

        self.tmpdir = tmpdir.strpath
        os.chdir(self.tmpdir)
        return 0
コード例 #21
0
class TestPlatypusNexus(object):
    @pytest.mark.usefixtures("no_data_directory")
    @pytest.fixture(autouse=True)
    def setup_method(self, tmpdir, data_directory):
        self.pth = pjoin(data_directory, "reduce")

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)
            self.f113 = PlatypusNexus(pjoin(self.pth, "PLP0011613.nx.hdf"))
            self.f641 = PlatypusNexus(pjoin(self.pth, "PLP0011641.nx.hdf"))

            # These PNR datasets all have different flipper settings
            self.f8861 = PlatypusNexus(
                pjoin(self.pth, "PNR_files/PLP0008861.nx.hdf"))
            self.f8862 = PlatypusNexus(
                pjoin(self.pth, "PNR_files/PLP0008862.nx.hdf"))
            self.f8863 = PlatypusNexus(
                pjoin(self.pth, "PNR_files/PLP0008863.nx.hdf"))
            self.f8864 = PlatypusNexus(
                pjoin(self.pth, "PNR_files/PLP0008864.nx.hdf"))

        self.cwd = os.getcwd()

        self.tmpdir = tmpdir.strpath
        os.chdir(self.tmpdir)
        return 0

    def teardown_method(self):
        os.chdir(self.cwd)

    def test_chod(self):
        flight_length = self.f113.chod()
        assert_almost_equal(flight_length[0], 7141.413818359375)
        assert_almost_equal(flight_length[1], 808)

        flight_length = self.f641.chod(omega=1.8, twotheta=3.6)
        assert_almost_equal(flight_length[0], 7146.3567785516016)
        assert_almost_equal(flight_length[1], 808)

    def test_phase_angle(self):
        # TODO. Add more tests where the phase_angle isn't zero.
        phase_angle, master_opening = self.f641.phase_angle()
        assert_almost_equal(phase_angle, 0)
        assert_almost_equal(master_opening, 60)

        assert self.f641.cat.t_offset is None

    def test_background_subtract_line(self):
        # checked each step of the background subtraction with IGOR
        # so this test background correction should be correct.

        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        mask = np.zeros(201, bool)
        mask[30:70] = True
        mask[130:160] = True

        profile, profile_sd = plp.background_subtract_line(
            yvals, yvals_sd, mask)

        verified_data = np.load(pjoin(self.pth, "background_subtract.npy"))

        assert_almost_equal(verified_data, np.c_[profile, profile_sd])

    def test_find_specular_ridge(self):
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 1000, 0, 1))
        detector = np.repeat(yvals[:, np.newaxis], 1000, axis=1).T
        detector_sd = np.sqrt(detector)
        output = plp.find_specular_ridge(detector[np.newaxis, :],
                                         detector_sd[np.newaxis, :])
        assert_(len(output) == 5)
        assert_almost_equal(output[0][0], 100)

    def test_pickle(self):
        # can we pickle a PlatypusNexus object?
        self.f8864.process()
        pkl = pickle.dumps(self.f8864)
        o = pickle.loads(pkl)
        ops, f8864 = o.processed_spectrum, self.f8864.processed_spectrum
        assert_allclose(ops["m_spec"], f8864["m_spec"])

    def test_background_subtract(self):
        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        # now make an (N, T, Y) detector image
        n_tbins = 10
        detector = np.repeat(yvals, n_tbins).reshape(xvals.size, n_tbins).T
        detector_sd = (np.repeat(yvals_sd,
                                 n_tbins).reshape(xvals.size, n_tbins).T)
        detector = detector.reshape(1, n_tbins, xvals.size)
        detector_sd = detector_sd.reshape(1, n_tbins, xvals.size)

        mask = np.zeros((1, n_tbins, 201), bool)
        mask[:, :, 30:70] = True
        mask[:, :, 130:160] = True

        det_bkg, detSD_bkg = plp.background_subtract(detector, detector_sd,
                                                     mask)

        # each of the (N, T) entries should have the same background subtracted
        # entries
        verified_data = np.load(pjoin(self.pth, "background_subtract.npy"))

        it = np.nditer(detector, flags=["multi_index"])
        it.remove_axis(2)
        while not it.finished:
            profile = det_bkg[it.multi_index]
            profile_sd = detSD_bkg[it.multi_index]
            assert_almost_equal(verified_data, np.c_[profile, profile_sd])
            it.iternext()

    def test_calculate_bins(self):
        bins = plp.calculate_wavelength_bins(2.0, 18, 2.0)
        assert_almost_equal(bins[0], 1.98)
        assert_almost_equal(bins[-1], 18.18)

    def test_event(self):
        # When you use event mode processing, make sure the right amount of
        # spectra are created
        out = self.f641.process(eventmode=[0, 900, 1800], integrate=0)
        assert_(np.size(out[1], axis=0) == 2)

    def test_event_folder(self):
        self.f641.process(eventmode=[0, 900, 1800],
                          integrate=0,
                          event_folder=self.pth)

    def test_multiple_acquisitions(self):
        """
        TODO: add a dataset which has multiple spectra in it, and make sure it
        processes.
        """
        pass

    def test_reduction_runs(self):
        # just check it runs
        self.f641.process()

        # check that event mode reduction gives the same output as non-event
        # mode reduction.
        spectrum0 = self.f641.process(direct=False)
        assert "reduction_options" in self.f641.processed_spectrum

        spectrum1 = self.f641.process(direct=False, eventmode=[], integrate=0)
        assert_allclose(spectrum0[1][0], spectrum1[1][0], rtol=0.001)

        # check that the wavelength resolution is roughly right, between 7 and
        # 8%.
        res = (self.f641.processed_spectrum["m_lambda_fwhm"][0] /
               self.f641.processed_spectrum["m_lambda"][0])
        assert_array_less(res, np.ones_like(res) * 0.08)
        assert_array_less(np.ones_like(res) * 0.07, res)

    def test_save_spectrum(self):
        # test saving spectrum
        self.f113.process()

        # can save the spectra by supplying a filename
        self.f113.write_spectrum_xml(pjoin(self.tmpdir, "test.xml"))
        self.f113.write_spectrum_dat(pjoin(self.tmpdir, "test.dat"))

        # can save by supplying file handle:
        with open(pjoin(self.tmpdir, "test.xml"), "wb") as f:
            self.f113.write_spectrum_xml(f)

    def test_accumulate_files(self):
        fnames = ["PLP0000708.nx.hdf", "PLP0000709.nx.hdf"]
        pths = [pjoin(self.pth, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)
        f8, f9, fadd = None, None, None

        try:
            f8 = h5py.File(pjoin(self.pth, "PLP0000708.nx.hdf"), "r")
            f9 = h5py.File(pjoin(self.pth, "PLP0000709.nx.hdf"), "r")
            fadd = h5py.File(pjoin(self.tmpdir, "ADD_PLP0000708.nx.hdf"), "r")

            f8d = f8["entry1/data/hmm"][0]
            f9d = f9["entry1/data/hmm"][0]
            faddd = fadd["entry1/data/hmm"][0]
            assert_equal(faddd, f8d + f9d)
        finally:
            if f8 is not None:
                f8.close()
            if f9 is not None:
                f9.close()
            if fadd is not None:
                fadd.close()

    def test_accumulate_files_reduce(self):
        # test by adding a file to itself. Should have smaller stats
        fnames = ["PLP0000708.nx.hdf", "PLP0000708.nx.hdf"]
        pths = [pjoin(self.pth, fname) for fname in fnames]
        plp.accumulate_HDF_files(pths)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)
            # it should be processable
            fadd = PlatypusNexus(pjoin(os.getcwd(), "ADD_PLP0000708.nx.hdf"))
            fadd.process()

            # it should also be reduceable
            reducer = PlatypusReduce(pjoin(self.pth, "PLP0000711.nx.hdf"))

            datasets, reduced = reducer.reduce(
                pjoin(os.getcwd(), "ADD_PLP0000708.nx.hdf"))
            assert_("y" in reduced)

            # the error bars should be smaller
            datasets2, reduced2 = reducer.reduce(
                pjoin(self.pth, "PLP0000708.nx.hdf"))

            assert_(np.all(reduced["y_err"] < reduced2["y_err"]))

    def test_manual_beam_find(self):
        # you can specify a function that finds where the specular ridge is.
        def manual_beam_find(detector, detector_sd, name):
            beam_centre = np.zeros(len(detector))
            beam_sd = np.zeros(len(detector))
            beam_centre += 50
            beam_sd += 5
            return beam_centre, beam_sd, np.array([40]), np.array([60]), [[]]

        # the manual beam find is only mandatory when peak_pos == -1.
        # the beam_sd is much larger than the beam divergence, so a warning
        # should be raised.
        with pytest.warns(UserWarning):
            self.f113.process(manual_beam_find=manual_beam_find, peak_pos=-1)
        assert_equal(self.f113.processed_spectrum["m_beampos"][0], 50)

        # manual beam finding also specifies the lower and upper pixel of the
        # foreground
        assert_equal(self.f113.processed_spectrum["lopx"][0], 40)
        assert_equal(self.f113.processed_spectrum["hipx"][0], 60)

    def test_fore_back_region(self):
        # calculation of foreground and background regions is done correctly
        centres = np.array([100.0, 90.0])
        sd = np.array([5.0, 11.5])
        lopx, hipx, background_pixels = fore_back_region(centres, sd)

        assert_(len(lopx) == 2)
        assert_(len(hipx) == 2)
        assert_(len(background_pixels) == 2)
        assert_(isinstance(lopx[0], numbers.Integral))
        assert_(isinstance(hipx[0], numbers.Integral))

        calc_lower = np.floor(centres - sd * EXTENT_MULT)
        assert_equal(lopx, calc_lower)
        calc_higher = np.ceil(centres + sd * EXTENT_MULT)
        assert_equal(hipx, calc_higher)

        y1 = np.atleast_1d(np.round(lopx - PIXEL_OFFSET).astype("int"))
        y0 = np.atleast_1d(
            np.round(lopx - PIXEL_OFFSET - EXTENT_MULT * sd).astype("int"))

        y2 = np.atleast_1d(np.round(hipx + PIXEL_OFFSET).astype("int"))
        y3 = np.atleast_1d(
            np.round(hipx + PIXEL_OFFSET + EXTENT_MULT * sd).astype("int"))

        bp = np.r_[np.arange(y0[0], y1[0] + 1), np.arange(y2[0], y3[0] + 1)]

        assert_equal(bp, background_pixels[0])

    def test_basename_datafile(self):
        # check that the right basename is returned
        pth = "a/b/c.nx.hdf"
        assert_(basename_datafile(pth) == "c")

        pth = "c.nx.hdf"
        assert_(basename_datafile(pth) == "c")

    def test_floodfield_correction(self):
        # check that flood field calculation works
        # the values were worked out by hand on a randomly
        # generated array
        test_norm = np.array(
            [1.12290503, 1.23743017, 0.8603352, 0.70111732, 1.07821229])
        test_norm_sd = np.array(
            [0.05600541, 0.05879208, 0.04902215, 0.04425413, 0.05487956])

        fname = os.path.join(self.pth, "flood.h5")
        with h5py.File(fname, "r") as f:
            norm, norm_sd = create_detector_norm(f, 3.5, -3.5, axis=3)

            assert_almost_equal(norm, test_norm, 6)
            assert_almost_equal(norm_sd, test_norm_sd, 6)

            norm, norm_sd = create_detector_norm(f, -3.5, 3.5, axis=3)
            assert_almost_equal(norm, test_norm, 6)
            assert_almost_equal(norm_sd, test_norm_sd, 6)

    def test_PNR_metadata(self):
        self.f8861.process()
        self.f8862.process()
        self.f8863.process()
        self.f8864.process()

        # Check that we can read all of the flipper statuses in files

        # Flipper 1 on, flipper 2 on
        assert_almost_equal(self.f8861.cat.cat["pol_flip_current"], 5.0)
        assert_almost_equal(self.f8861.cat.cat["anal_flip_current"], 4.5)

        # Flipper 1 on, flipper 2 off
        assert_almost_equal(self.f8862.cat.cat["anal_flip_current"], 0)
        assert_almost_equal(self.f8862.cat.cat["pol_flip_current"], 5.0)

        # Flipper 1 off, flipper 2 on
        assert_almost_equal(self.f8863.cat.cat["anal_flip_current"], 4.5)
        assert_almost_equal(self.f8863.cat.cat["pol_flip_current"], 0)

        # Flipper 1 off, flipper 2 off
        assert_almost_equal(self.f8864.cat.cat["anal_flip_current"], 0)
        assert_almost_equal(self.f8864.cat.cat["pol_flip_current"], 0)

        # Check SpinChannel for each file
        assert self.f8861.spin_state == SpinChannel.UP_UP
        assert self.f8862.spin_state == SpinChannel.UP_DOWN
        assert self.f8863.spin_state == SpinChannel.DOWN_UP
        assert self.f8864.spin_state == SpinChannel.DOWN_DOWN

        # test spin channel setting
        # non spin analysed. mode is POL, not POLANAL
        pn = PlatypusNexus(pjoin(self.pth, "PLP0016427.nx.hdf"))
        assert pn.spin_state == SpinChannel.UP_UP
        pn = PlatypusNexus(pjoin(self.pth, "PLP0016426.nx.hdf"))
        assert pn.spin_state == SpinChannel.DOWN_DOWN

    def test_PNR_magnet_read(self):
        self.f8861.process()
        # Check magnetic field sensors
        assert_almost_equal(self.f8861.cat.cat["magnet_current_set"], 0)
        assert_almost_equal(self.f8861.cat.cat["magnet_output_current"], 0.001)