コード例 #1
0
    def setup(self):
        pth = os.path.dirname(os.path.abspath(refnx.reflect.__file__))
        e361 = RD(os.path.join(pth, 'test', 'e361r.txt'))

        sio2 = SLD(3.47, name='SiO2')
        si = SLD(2.07, name='Si')
        d2o = SLD(6.36, name='D2O')
        polymer = SLD(1, name='polymer')

        # e361 is an older dataset, but well characterised
        structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
        model361 = ReflectModel(structure361, bkg=2e-5)

        model361.scale.vary = True
        model361.bkg.vary = True
        model361.scale.range(0.1, 2)
        model361.bkg.range(0, 5e-5)
        model361.dq = 5.

        # d2o
        structure361[-1].sld.real.vary = True
        structure361[-1].sld.real.range(6, 6.36)

        structure361[1].thick.vary = True
        structure361[1].thick.range(5, 20)
        structure361[2].thick.vary = True
        structure361[2].thick.range(100, 220)

        structure361[2].sld.real.vary = True
        structure361[2].sld.real.range(0.2, 1.5)

        e361.x_err = None
        objective = Objective(model361, e361)
        self.fitter = CurveFitter(objective, nwalkers=200)
        self.fitter.initialise('jitter')
コード例 #2
0
def ref_plot(datasets):
    """
    Quickly plot a lot of datasets

    Parameters
    ----------
    datasets : iterable
        strings or files identifying the datasets to plot

    Returns
    -------
    fig : matplotlib.figure.Figure
        The figure. Use fig.show() to display
    """
    fig = plt.figure()
    ax = fig.add_subplot(111)

    for dataset in datasets:
        d = ReflectDataset()
        d.load(dataset)
        ax.plot(d.x, d.y)

    ax.autoscale(tight=True)
    ax.set_yscale('log')
    ax.set_xlabel(u"Q /\u212B **-1")
    ax.set_ylabel('reflectivity')
    return fig
コード例 #3
0
ファイル: quickplot.py プロジェクト: llimeht/refnx
def ref_plot(datasets):
    """
    Quickly plot a lot of datasets

    Parameters
    ----------
    datasets : iterable
        strings or files identifying the datasets to plot

    Returns
    -------
    fig : matplotlib.figure.Figure
        The figure. Use fig.show() to display
    """
    fig = plt.figure()
    ax = fig.add_subplot(111)

    for dataset in datasets:
        d = ReflectDataset()
        d.load(dataset)
        ax.plot(d.x, d.y)

    ax.autoscale(tight=True)
    ax.set_yscale('log')
    ax.set_xlabel(u"Q /\u212B **-1")
    ax.set_ylabel('reflectivity')
    return fig
コード例 #4
0
    def snapshot(self, snapshot_name):
        original = self.datastore["theoretical"]
        dataset = ReflectDataset()
        dataset.data = (
            original.dataset.x,
            original.model.model(original.dataset.x, x_err=dataset.x_err),
        )
        dataset.name = snapshot_name

        new_model = deepcopy(original.model)
        new_model.name = snapshot_name

        # if the snapshot already exists then overwrite it.
        if snapshot_name in self.datastore.names:
            row = self.data_object_row(snapshot_name)
            self._rootnode.child(row).set_dataset(dataset)
            self._rootnode.child(row).set_reflect_model(new_model)
            data_object = self.data_object_node(snapshot_name).data_object
        else:
            # otherwise you have to add it.
            data_object = DataObject(dataset)
            data_object.model = new_model
            self._rootnode.set_data_object(data_object)

        return data_object
コード例 #5
0
ファイル: datastore.py プロジェクト: sleepy-owl/refnx
    def __init__(self):
        super(DataStore, self).__init__()
        self.data_objects = OrderedDict()

        # create the default theoretical dataset
        q = np.linspace(0.005, 0.5, 1000)
        r = np.empty_like(q)
        dataset = ReflectDataset()
        dataset.data = (q, r)
        dataset.name = "theoretical"
        air = SLD(0, name="fronting")
        sio2 = SLD(3.47, name="1")
        si = SLD(2.07, name="backing")
        structure = air(0, 0) | sio2(15, 3.0) | si(0, 3.0)

        structure[1].name = "slab"
        structure[1].thick.name = "thick"
        structure[1].rough.name = "rough"
        structure[1].sld.real.name = "sld"
        structure[1].sld.imag.name = "isld"
        structure[1].vfsolv.name = "vfsolv"

        model = ReflectModel(structure, name="theoretical")
        self.add(dataset)
        self["theoretical"].model = model
コード例 #6
0
ファイル: datastore.py プロジェクト: brotwasme/refnx2019
    def __init__(self):
        super(DataStore, self).__init__()
        self.data_objects = OrderedDict()

        # create the default theoretical dataset
        q = np.linspace(0.005, 0.5, 1000)
        r = np.empty_like(q)
        dataset = ReflectDataset()
        dataset.data = (q, r)
        dataset.name = 'theoretical'
        air = SLD(0, name='fronting')
        sio2 = SLD(3.47, name='1')
        si = SLD(2.07, name='backing')
        structure = air(0, 0) | sio2(15, 3.) | si(0, 3.)

        structure[1].name = 'slab'
        structure[1].thick.name = 'thick'
        structure[1].rough.name = 'rough'
        structure[1].sld.real.name = 'sld'
        structure[1].sld.imag.name = 'isld'
        structure[1].vfsolv.name = 'vfsolv'

        model = ReflectModel(structure, name='theoretical')
        self.add(dataset)
        self['theoretical'].model = model
コード例 #7
0
 def test_load_dat_with_header(self):
     # check that the file load works with a header
     a = ReflectDataset(os.path.join(self.pth, 'c_PLP0000708.dat'))
     b = ReflectDataset(os.path.join(self.pth, 'c_PLP0000708_header.dat'))
     c = ReflectDataset(os.path.join(self.pth, 'c_PLP0000708_header2.dat'))
     assert_equal(len(a), len(b))
     assert_equal(len(a), len(c))
コード例 #8
0
    def setUp(self):
        data = ReflectDataset()

        x1 = np.linspace(0, 10, 5)
        y1 = 2 * x1
        e1 = np.ones_like(x1)
        dx1 = np.ones_like(x1)
        data.add_data((x1, y1, e1, dx1))
        self.data = data
コード例 #9
0
    def test_construction(self):
        # test we can construct a dataset directly from a file.
        pth = os.path.join(self.pth, 'c_PLP0000708.xml')

        ReflectDataset(pth)

        with open(os.path.join(self.pth, 'c_PLP0000708.xml')) as f:
            ReflectDataset(f)

        ReflectDataset(os.path.join(self.pth, 'd_a.txt'))
コード例 #10
0
ファイル: test_reflectdataset.py プロジェクト: refnx/refnx
    def setUp(self):
        data = ReflectDataset()

        x1 = np.linspace(0, 10, 5)
        y1 = 2 * x1
        e1 = np.ones_like(x1)
        dx1 = np.ones_like(x1)
        data.add_data((x1, y1, e1, dx1))
        self.data = data

        self.cwd = os.getcwd()
        self.tmpdir = TemporaryDirectory()
        os.chdir(self.tmpdir.name)
コード例 #11
0
    def setup_method(self, tmpdir):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        data = ReflectDataset()

        x1 = np.linspace(0, 10, 115)
        y1 = 2 * x1
        e1 = np.ones_like(x1)
        dx1 = np.ones_like(x1)
        data.add_data((x1, y1, e1, dx1))
        self.data = data

        self.cwd = os.getcwd()
        self.tmpdir = tmpdir.strpath
        os.chdir(self.tmpdir)
コード例 #12
0
ファイル: tof_simulator.py プロジェクト: igresh/refnx-models
    def reflectivity(self):
        """
        The reflectivity of the sampled system
        """
        rerr = np.sqrt(self.reflected_beam)
        ierr = np.sqrt(self.direct_beam)
        dx = np.sqrt((self.dlambda)**2 + self.dtheta**2 + self.rebin**2)

        ref, rerr = ErrorProp.EPdiv(self.reflected_beam, rerr,
                                    self.direct_beam, ierr)
        dataset = ReflectDataset(data=(self.q, ref, rerr, dx * self.q))

        # apply some counting statistics on top of dataset otherwise there will
        # be no variation at e.g. critical edge.
        return dataset.synthesise()
コード例 #13
0
    def test_reduction_method(self):

        # a quick smoke test to check that the reduction can occur
        # warnings filter for pixel size
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)

            a = PlatypusReduce("PLP0000711.nx.hdf", data_folder=self.pth)

            # try reduction with the reduce method
            a.reduce(
                "PLP0000708.nx.hdf",
                data_folder=self.pth,
                rebin_percent=4,
            )

            # try reduction with the __call__ method
            a(
                "PLP0000708.nx.hdf",
                data_folder=self.pth,
                rebin_percent=4,
            )

            # this should also have saved a couple of files in the current
            # directory
            assert os.path.isfile("./PLP0000708_0.dat")
            assert os.path.isfile("./PLP0000708_0.xml")

            # can we read the file
            ReflectDataset("./PLP0000708_0.dat")

            # try writing offspecular data
            a.write_offspecular("offspec.xml", 0)
コード例 #14
0
ファイル: mpi_parallelisation.py プロジェクト: llimeht/refnx
def setup():
    # load the data.
    DATASET_NAME = os.path.join(refnx.__path__[0],
                                'analysis',
                                'test',
                                'c_PLP0011859_q.txt')

    # load the data
    data = ReflectDataset(DATASET_NAME)

    # the materials we're using
    si = SLD(2.07, name='Si')
    sio2 = SLD(3.47, name='SiO2')
    film = SLD(2, name='film')
    d2o = SLD(6.36, name='d2o')

    structure = si | sio2(30, 3) | film(250, 3) | d2o(0, 3)
    structure[1].thick.setp(vary=True, bounds=(15., 50.))
    structure[1].rough.setp(vary=True, bounds=(1., 6.))
    structure[2].thick.setp(vary=True, bounds=(200, 300))
    structure[2].sld.real.setp(vary=True, bounds=(0.1, 3))
    structure[2].rough.setp(vary=True, bounds=(1, 6))

    model = ReflectModel(structure, bkg=9e-6, scale=1.)
    model.bkg.setp(vary=True, bounds=(1e-8, 1e-5))
    model.scale.setp(vary=True, bounds=(0.9, 1.1))
    model.threads = 1
    # fit on a logR scale, but use weighting
    objective = Objective(model, data, transform=Transform('logY'),
                          use_weights=True)

    return objective
コード例 #15
0
    def setup_method(self):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        self.si = SLD(2.07, name='Si')
        self.sio2 = SLD(3.47, name='SiO2')
        self.d2o = SLD(6.36, name='d2o')
        self.h2o = SLD(-0.56, name='h2o')
        self.cm3 = SLD(3.5, name='cm3')
        self.polymer = SLD(2, name='polymer')

        self.sio2_l = self.sio2(40, 3)
        self.polymer_l = self.polymer(200, 3)

        self.structure = (self.si | self.sio2_l | self.polymer_l
                          | self.d2o(0, 3))

        fname = os.path.join(self.pth, 'c_PLP0011859_q.txt')

        self.dataset = ReflectDataset(fname)
        self.model = ReflectModel(self.structure, bkg=2e-7)
        self.objective = Objective(self.model,
                                   self.dataset,
                                   use_weights=False,
                                   transform=Transform('logY'))
        self.global_objective = GlobalObjective([self.objective])
コード例 #16
0
    def test_loading_junk(self):
        # if you can't load anything from a datafile then you should get a
        # RuntimeError raised.
        from pytest import raises

        with raises(RuntimeError):
            ReflectDataset(os.path.join(self.pth, "../__init__.py"))
コード例 #17
0
    def test_repr(self):
        a = Data1D(os.path.join(self.pth, "c_PLP0033831.txt"))
        b = eval(repr(a))
        assert_equal(len(a), 166)
        assert_equal(len(b), 166)

        # load dataset from XML, via string
        a = ReflectDataset(os.path.join(self.pth, "c_PLP0000708.xml"))
        b = eval(repr(a))
        assert_equal(len(b), len(a))
        assert_equal(len(b), len(a))
コード例 #18
0
    def splice_datasets(self, ds):
        """
        Combines datasets together.

        Parameters
        ----------
        ds: list of `refnx.dataset.Data1D`
            The datasets to splice together.

        Returns
        -------
        fname: str
            The name of the combined dataset

        Notes
        -----
        The combined dataset is saved as `f"c_{d.filename}.dat"`,
        where d is the dataset with the lowest average Q value
        from ds.
        """
        appended_ds = ReflectDataset()

        datasets = []
        average_q = []
        for d in ds:
            dataset = ReflectDataset(d)
            average_q.append(np.mean(dataset.x))
            datasets.append(dataset)

        idxs = np.argsort(average_q)
        # sort datasets according to average Q.
        datasets = [d for _, d in sorted(zip(idxs, datasets))]

        for dataset in datasets:
            appended_ds += dataset

        fname = datasets[0].filename.rstrip(".dat")
        fname = fname.split("_")[0]
        fname = f"c_{fname}.dat"
        appended_ds.save(fname)
        return fname
コード例 #19
0
    def load_data(self, data):
        """
        Load a dataset into the `Motofit` instance.

        Parameters
        ----------
        data: refnx.dataset.Data1D, or str, or file-like
        """
        if isinstance(data, ReflectDataset):
            self.dataset = data
        else:
            self.dataset = ReflectDataset(data)

        self.dataset_name.value = self.dataset.name

        # loading a dataset changes the objective and curvefitter
        self._update_analysis_objects()

        self.qmin = np.min(self.dataset.x)
        self.qmax = np.max(self.dataset.x)
        if self.fig is not None:
            yt, et = self.transform(self.dataset.x, self.dataset.y)

            if self.data_plot is None:
                (self.data_plot, ) = self.ax_data.plot(
                    self.dataset.x,
                    yt,
                    label=self.dataset.name,
                    ms=2,
                    marker="o",
                    ls="",
                    zorder=1,
                )
                self.data_plot.set_label(self.dataset.name)
                self.ax_data.legend()

                # no need to calculate residuals here, that'll be updated in
                # the redraw method
                (self.residuals_plot, ) = self.ax_residual.plot(self.dataset.x)
            else:
                self.data_plot.set_xdata(self.dataset.x)
                self.data_plot.set_ydata(yt)

            # calculate theoretical model over same range as data
            # use redraw over update_model because it ensures chi2 widget gets
            # displayed
            self.redraw(None)
            self.ax_data.relim()
            self.ax_data.autoscale_view()
            self.ax_residual.relim()
            self.ax_residual.autoscale_view()
            self.fig.canvas.draw()
コード例 #20
0
    def __load_data(self, file_path):
        """Loads a reflectivity dataset from a given file path and applies scaling.

        Args:
            file_path (string): a path to the file with the data to construct the model for.

        """
        data = ReflectDataset(file_path) #Load the data for which the model is designed for.
        self.filename = os.path.basename(data.filename)
        data.scale(np.max(data.data[1])) #Normalise Y and Error by dividing by max R point.

        x, y, y_err = data.x.tolist(), data.y.tolist(), data.y_err.tolist()
        removed = [] #Remove any points containing 0 values as these cause NaNs when fitting.
        for i in range(len(x)):
            if x[i] == 0 or y[i] == 0 or y_err[i] == 0:
                removed.append(i)

        #Remove the identified points and return the processed dataset.
        x     = np.delete(np.array(x),     removed)
        y     = np.delete(np.array(y),     removed)
        y_err = np.delete(np.array(y_err), removed)
        data_new = np.array([x, y, y_err])
        return ReflectDataset(data_new)
コード例 #21
0
ファイル: test_objective.py プロジェクト: brotwasme/refnx
    def test_transform(self):
        pth = os.path.dirname(os.path.abspath(__file__))

        fname = os.path.join(pth, 'c_PLP0011859_q.txt')
        data = ReflectDataset(fname)
        t = Transform('logY')

        yt, et = t(data.x, data.y, y_err=data.y_err)
        assert_equal(yt, np.log10(data.y))

        yt, _ = t(data.x, data.y, y_err=None)
        assert_equal(yt, np.log10(data.y))

        EPy, EPe = EP.EPlog10(data.y, data.y_err)
        assert_equal(yt, EPy)
        assert_equal(et, EPe)
コード例 #22
0
ファイル: test__code_fragment.py プロジェクト: igresh/refnx
    def test_code_fragment(self):
        e361 = ReflectDataset(os.path.join(self.pth, "e361r.txt"))

        si = SLD(2.07, name="Si")
        sio2 = SLD(3.47, name="SiO2")
        d2o = SLD(6.36, name="D2O")
        polymer = SLD(1, name="polymer")

        # e361 is an older dataset, but well characterised
        self.structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
        self.model361 = ReflectModel(self.structure361, bkg=2e-5)

        self.model361.scale.vary = True
        self.model361.bkg.vary = True
        self.model361.scale.range(0.1, 2)
        self.model361.bkg.range(0, 5e-5)

        # d2o
        self.structure361[-1].sld.real.vary = True
        self.structure361[-1].sld.real.range(6, 6.36)

        self.structure361[1].thick.vary = True
        self.structure361[1].thick.range(5, 20)

        self.structure361[2].thick.vary = True
        self.structure361[2].thick.range(100, 220)

        self.structure361[2].sld.real.vary = True
        self.structure361[2].sld.real.range(0.2, 1.5)

        objective = Objective(self.model361, e361, transform=Transform("logY"))
        objective2 = eval(repr(objective))
        assert_allclose(objective2.chisqr(), objective.chisqr())

        exec(repr(objective))
        exec(code_fragment(objective))

        # artificially link the two thicknesses together
        # check that we can reproduce the objective from the repr
        self.structure361[2].thick.constraint = self.structure361[1].thick
        fragment = code_fragment(objective)
        fragment = fragment + "\nobj = objective()\nresult = obj.chisqr()"
        d = {}
        # need to provide the globals dictionary to exec, so it can see imports
        # e.g. https://bit.ly/2RFOF7i (from stackoverflow)
        exec(fragment, globals(), d)
        assert_allclose(d["result"], objective.chisqr())
コード例 #23
0
    def setup_method(self):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        sio2 = SLD(3.47, name="SiO2")
        air = SLD(0, name="air")
        si = SLD(2.07, name="Si")
        d2o = SLD(6.36, name="D2O")
        polymer = SLD(1, name="polymer")

        self.structure = air | sio2(100, 2) | si(0, 3)

        theoretical = np.loadtxt(os.path.join(self.pth, "theoretical.txt"))
        qvals, rvals = np.hsplit(theoretical, 2)
        self.qvals = qvals.flatten()
        self.rvals = rvals.flatten()

        # e361 is an older dataset, but well characterised
        self.structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
        self.model361 = ReflectModel(self.structure361, bkg=2e-5)

        self.model361.scale.vary = True
        self.model361.bkg.vary = True
        self.model361.scale.range(0.1, 2)
        self.model361.bkg.range(0, 5e-5)

        # d2o
        self.structure361[-1].sld.real.vary = True
        self.structure361[-1].sld.real.range(6, 6.36)

        self.structure361[1].thick.vary = True
        self.structure361[1].thick.range(5, 20)

        self.structure361[2].thick.vary = True
        self.structure361[2].thick.range(100, 220)

        self.structure361[2].sld.real.vary = True
        self.structure361[2].sld.real.range(0.2, 1.5)

        self.e361 = ReflectDataset(os.path.join(self.pth, "e361r.txt"))
        self.qvals361, self.rvals361, self.evals361 = (
            self.e361.x,
            self.e361.y,
            self.e361.y_err,
        )
        self.app = Motofit()
        self.app(self.e361, model=self.model361)
コード例 #24
0
    def plot(self, files_to_display=False):
        """
        Parameters
        ----------
        files_to_display : sequence of str
            filenames to display in the plot window
        """
        if not files_to_display:
            files = QtWidgets.QFileDialog.getOpenFileNames(
                self,
                "Select reflectometry data files to plot",
                directory=self.data_directory,
                filter="Reflectometry files (*.xml *.dat)",
            )
            files_to_display = files[0]

        if not files_to_display:
            # could've cancelled the file dialogue
            return

        # instead of ax.hold(False)
        self.figure.clear()

        # create an axis
        self.ax = self.figure.add_subplot(111)

        displayed = {}

        # load each file and display it
        for file in files_to_display:
            dataset = ReflectDataset(file)

            # plot data
            line = self.ax.errorbar(*dataset.data[0:3], label=dataset.name)

            displayed[dataset.name] = (dataset, line)

        # add legend and plot log-lin
        self.ax.legend()
        self.ax.set_yscale("log")

        self.files_displayed = displayed

        # refresh canvas
        self.canvas.draw()
コード例 #25
0
ファイル: test_reflect.py プロジェクト: jfkcooper/refnx
    def test_resolution_speed_comparator(self):
        fname = os.path.join(self.pth, "c_PLP0011859_q.txt")
        dataset = ReflectDataset(fname)

        sio2 = SLD(3.47, name="SiO2")
        si = SLD(2.07, name="Si")
        d2o = SLD(6.36, name="D2O")
        polymer = SLD(2.0, name="polymer")

        sio2_l = sio2(30, 3)
        polymer_l = polymer(125, 3)

        dx = dataset.x_err
        structure = si | sio2_l | polymer_l | polymer_l | d2o(0, 3)
        model = ReflectModel(structure, bkg=2e-6, dq_type="constant")
        objective = Objective(model,
                              dataset,
                              use_weights=False,
                              transform=Transform("logY"))

        # check that choose_resolution_approach doesn't change state
        # of model
        fastest_method = choose_dq_type(objective)
        assert model.dq_type == "constant"
        assert_equal(dx, objective.data.x_err)

        # check that the comparison worked
        const_time = time.time()
        for i in range(1000):
            objective.generative()
        const_time = time.time() - const_time

        model.dq_type = "pointwise"
        point_time = time.time()
        for i in range(1000):
            objective.generative()
        point_time = time.time() - point_time

        if fastest_method == "pointwise":
            assert point_time < const_time
        elif fastest_method == "constant":
            assert const_time < point_time

        # check that we could use the function to setup a reflectmodel
        ReflectModel(structure, bkg=2e-6, dq_type=choose_dq_type(objective))
コード例 #26
0
ファイル: test_reflect.py プロジェクト: jfkcooper/refnx
    def test_modelvals_degenerate_layers(self):
        # try fitting dataset with a deposited layer split into two degenerate
        # layers
        fname = os.path.join(self.pth, "c_PLP0011859_q.txt")
        dataset = ReflectDataset(fname)

        sio2 = SLD(3.47, name="SiO2")
        si = SLD(2.07, name="Si")
        d2o = SLD(6.36, name="D2O")
        polymer = SLD(2.0, name="polymer")

        sio2_l = sio2(30, 3)
        polymer_l = polymer(125, 3)

        structure = si | sio2_l | polymer_l | polymer_l | d2o(0, 3)

        polymer_l.thick.setp(value=125, vary=True, bounds=(0, 250))
        polymer_l.rough.setp(value=4, vary=True, bounds=(0, 8))
        structure[-1].rough.setp(vary=True, bounds=(0, 6))
        sio2_l.rough.setp(value=3.16, vary=True, bounds=(0, 8))

        model = ReflectModel(structure, bkg=2e-6)
        objective = Objective(model,
                              dataset,
                              use_weights=False,
                              transform=Transform("logY"))

        model.scale.setp(vary=True, bounds=(0, 2))
        model.bkg.setp(vary=True, bounds=(0, 8e-6))

        slabs = structure.slabs()
        assert_equal(slabs[2, 0:2], slabs[3, 0:2])
        assert_equal(slabs[2, 3], slabs[3, 3])
        assert_equal(slabs[1, 3], sio2_l.rough.value)

        f = CurveFitter(objective)
        f.fit(method="differential_evolution", seed=1, maxiter=3)

        slabs = structure.slabs()
        assert_equal(slabs[2, 0:2], slabs[3, 0:2])
        assert_equal(slabs[2, 3], slabs[3, 3])
コード例 #27
0
 def test_load(self):
     # test reflectivity calculation with values generated from Motofit
     dataset = ReflectDataset()
     with open(os.path.join(path, 'c_PLP0000708.xml')) as f:
         dataset.load(f)
     
     assert_equal(dataset.npoints, 90)
     assert_equal(90, np.size(dataset.x))
     
     dataset1 = ReflectDataset()
     with open(os.path.join(path, 'c_PLP0000708.dat')) as f:
         dataset1.load(f)
     
     assert_equal(dataset1.npoints, 90)
     assert_equal(90, np.size(dataset1.x))
コード例 #28
0
    def reflectivity(self):
        """
        The reflectivity of the sampled system
        """
        rerr = np.sqrt(self.reflected_beam)
        bmon_reflect_err = np.sqrt(self.bmon_reflect)

        ierr = np.sqrt(self.direct_beam)
        bmon_direct_err = np.sqrt(self.bmon_direct)

        dx = np.sqrt(
            (self.dlambda) ** 2 + self.dtheta ** 2 + (0.68 * self.rebin) ** 2
        )
        dx *= self.q

        # divide reflectivity signal by bmon
        ref, rerr = ErrorProp.EPdiv(
            self.reflected_beam, rerr, self.bmon_reflect, bmon_reflect_err
        )
        # divide direct signal by bmon
        direct, ierr = ErrorProp.EPdiv(
            self.direct_beam, ierr, self.bmon_direct, bmon_direct_err
        )

        # now calculate reflectivity
        ref, rerr = ErrorProp.EPdiv(ref, rerr, direct, ierr)

        # filter points with zero counts because error is incorrect
        mask = rerr != 0

        dataset = ReflectDataset(
            data=(self.q[mask], ref[mask], rerr[mask], dx[mask])
        )

        # apply some counting statistics on top of dataset otherwise there will
        # be no variation at e.g. critical edge.
        # return dataset.synthesise()
        return dataset
コード例 #29
0
ファイル: datastore.py プロジェクト: brotwasme/refnx2019
 def load(self, filename):
     dataset = ReflectDataset(filename)
     data_object = self.add(dataset)
     return data_object
コード例 #30
0
    def reducer(self, callback=None):
        """
        Reduce all the entries in reduction_entries

        Parameters
        ----------
        callback : callable
            Function, `f(percent_finished)` that is called with the current
            percentage progress of the reduction
        """

        # refnx.reduce.reduce needs you to be in the directory where you're
        # going to write files to
        if self.output_directory:
            os.chdir(self.output_directory)

        # if no data directory was specified then assume it's the cwd
        data_directory = self.data_directory
        if not data_directory:
            data_directory = "./"

        def full_path(fname):
            f = os.path.join(data_directory, fname)
            return f

        # if the streamed directory isn't mentioned then assume it's the same
        # as the data directory
        streamed_directory = self.streamed_directory
        if not os.path.isdir(streamed_directory):
            self.streamed_directory = data_directory

        logging.info("-------------------------------------------------------"
                     "\nStarting reduction run")
        logging.info(
            "data_folder={data_directory}, trim_trailing=True, "
            "lo_wavelength={low_wavelength}, "
            "hi_wavelength={high_wavelength}, "
            "rebin_percent={rebin_percent}, "
            "normalise={monitor_normalisation}, "
            "background={background_subtraction} "
            "eventmode={streamed_reduction} "
            "event_folder={streamed_directory}".format(**self.__dict__))

        # sets up time slices for event reduction
        if self.streamed_reduction:
            eventmode = np.arange(self.stream_start, self.stream_end,
                                  self.stream_duration)
            eventmode = np.r_[eventmode, self.stream_end]
        else:
            eventmode = None

        # are you manual beamfinding?
        peak_pos = None
        if self.manual_beam_find and self.manual_beam_finder is not None:
            peak_pos = -1

        idx = 0

        cached_direct_beams = {}

        for row, val in self.reduction_entries.items():
            if not val["use"]:
                continue

            flood = None
            if val["flood"]:
                flood = full_path(val["flood"])

            combined_dataset = None

            # process entries one by one
            for ref, db in zip(
                ["reflect-1", "reflect-2", "reflect-3"],
                ["direct-1", "direct-2", "direct-3"],
            ):
                reflect = val[ref]
                direct = val[db]

                # if the file doesn't exist there's no point continuing
                if (not os.path.isfile(full_path(reflect))) or (
                        not os.path.isfile(full_path(direct))):
                    continue

                # which of the nspectra to reduce (or all)
                ref_pn = PlatypusNexus(full_path(reflect))

                if direct not in cached_direct_beams:
                    cached_direct_beams[direct] = PlatypusReduce(
                        direct, data_folder=data_directory)

                reducer = cached_direct_beams[direct]

                try:
                    reduced = reducer(
                        ref_pn,
                        scale=val["scale"],
                        h5norm=flood,
                        lo_wavelength=self.low_wavelength,
                        hi_wavelength=self.high_wavelength,
                        rebin_percent=self.rebin_percent,
                        normalise=self.monitor_normalisation,
                        background=self.background_subtraction,
                        manual_beam_find=self.manual_beam_finder,
                        peak_pos=peak_pos,
                        eventmode=eventmode,
                        event_folder=streamed_directory,
                    )
                except Exception as e:
                    # typical Exception would be ValueError for non overlapping
                    # angles
                    logging.info(e)
                    continue

                logging.info("Reduced {} vs {}, scale={}, angle={}".format(
                    reflect,
                    direct,
                    val["scale"],
                    reduced[1]["omega"][0, 0],
                ))

                if combined_dataset is None:
                    combined_dataset = ReflectDataset()

                    fname = basename_datafile(reflect)
                    fname_dat = os.path.join(self.output_directory,
                                             "c_{0}.dat".format(fname))
                    fname_xml = os.path.join(self.output_directory,
                                             "c_{0}.xml".format(fname))

                try:
                    combined_dataset.add_data(
                        reducer.data(),
                        requires_splice=True,
                        trim_trailing=True,
                    )
                except ValueError as e:
                    # datasets don't overlap
                    logging.info(e)
                    continue

            if combined_dataset is not None:
                # after you've finished reducing write a combined file.
                with open(fname_dat, "wb") as f:
                    combined_dataset.save(f)
                with open(fname_xml, "wb") as f:
                    combined_dataset.save_xml(f)
                logging.info("Written combined files: {} and {}".format(
                    fname_dat, fname_xml))

            # can be used to create a progress bar
            idx += 1
            if callback is not None:
                ok = callback(100 * idx / len(self.reduction_entries))
                if not ok:
                    break

        logging.info("\nFinished reduction run"
                     "-------------------------------------------------------")
コード例 #31
0
        else:
            co = 15
        sim = md.MDSimulation(traj_dir + '_frame{}.pdb'.format(i), flip=True,
                              verbose=True, layer_thickness=lt, roughness=rough)

        sim.assign_scattering_lengths('neutron', atom_types=lgts[0], scattering_lengths=lgts[1])

        sim.run()
        layers_to_cut = int(co / lt) + 1
        timesteps += sim.layers.shape[0]
        l = np.append(l, sim.layers[:, :-layers_to_cut, :])

    n = l.reshape(timesteps, sim.layers.shape[1]-layers_to_cut, sim.layers.shape[2])

    data_dir = '../data/reflectometry2/dspc_{}/'.format(surface_pressure)
    dataset = ReflectDataset(os.path.join(data_dir, '{}{}.dat'.format(contrast, surface_pressure)))

    refy = np.zeros((n.shape[0], dataset.x.size))
    sldy = []
    chi = np.zeros((n.shape[0]))
    print(n.shape[0])
    for i in range(n.shape[0]):
        sim.av_layers = n[i, :, :]
        model = ReflectModel(sim)
        model.scale.setp(1, vary=True, bounds=(0.00000001, np.inf))
        model.bkg.setp(dataset.y[-1], vary=False)
        objective = Objective(model, dataset, transform=Transform('YX4'))
        fitter = CurveFitter(objective)
        res = fitter.fit()
        refy[i] = model(dataset.x, x_err=dataset.x_err)*(dataset.x)**4
        sldy.append(sim.sld_profile()[1])
コード例 #32
0
ファイル: reduce.py プロジェクト: brotwasme/refnx2019
def reduce_stitch(reflect_list,
                  direct_list,
                  background_list=None,
                  norm_file_num=None,
                  data_folder=None,
                  prefix='PLP',
                  trim_trailing=True,
                  save=True,
                  **kwds):
    """
    Reduces a list of reflected beam run numbers and a list of corresponding
    direct beam run numbers from the Platypus reflectometer. If there are
    multiple reflectivity files they are spliced together.

    Parameters
    ----------
    reflect_list : list
        Reflected beam run numbers, e.g. `[708, 709, 710]`
        708 corresponds to the file PLP0000708.nx.hdf.
    direct_list : list
        Direct beam run numbers, e.g. `[711, 711, 711]`
    background_list : list, optional
        List of `bool` to control whether background subtraction is used
        for each reduction, e.g. `[False, True, True]`. The default is to do
        a background subtraction on all runs.
    norm_file_num : int, optional
        The run number for the water flood field correction.
    data_folder : str, optional
        Where is the raw data stored?
    prefix : str, optional
        The instrument filename prefix.
    trim_trailing : bool, optional
        When datasets are spliced together do you want to remove points in the
        overlap region from the preceding dataset?
    save : bool, optional
        If `True` then the spliced file is written to a file (in the working
        directory) with a name like: `c_PLP0000708.dat`.
    kwds : dict, optional
        Options passed directly to `refnx.reduce.platypusnexus.process`,
        for processing of individual spectra. Look at that method docstring
        for specification of options.

    Returns
    -------
    combined_dataset, reduced_filename : refnx.dataset.ReflectDataset, str
        The combined dataset and the file name of the reduced data, if it was
        saved. If it wasn't saved `reduced_filename` is `None`.

    Notes
    -----
    If `background` is in the supplied `kwds` it is ignored.
    The `prefix` is used to specify the run numbers to a filename.
    For example a run number of 10, and a prefix of `PLP` resolves to a
    NeXus filename of 'PLP0000010.nx.hdf'.

    Examples
    --------

    >>> from refnx.reduce import reduce_stitch
    >>> dataset, fname = reduce_stitch([708, 709, 710],
    ...                                [711, 711, 711],
    ...                                 rebin_percent=2)

    """

    scale = kwds.get('scale', 1.)

    kwds_copy = {}
    kwds_copy.update(kwds)
    kwds_copy.pop('background', None)

    if not background_list:
        background_list = [True] * len(reflect_list)

    # now reduce all the files.
    zipped = zip(reflect_list, direct_list, background_list)

    combined_dataset = ReflectDataset()

    if data_folder is None:
        data_folder = os.getcwd()

    if norm_file_num:
        norm_datafile = number_datafile(norm_file_num, prefix=prefix)
        kwds['h5norm'] = norm_datafile

    if prefix == 'PLP':
        reducer_klass = PlatypusReduce
    else:
        raise ValueError("Incorrect prefix specified")

    for index, val in enumerate(zipped):
        reflect_datafile = os.path.join(data_folder,
                                        number_datafile(val[0], prefix=prefix))
        direct_datafile = os.path.join(data_folder,
                                       number_datafile(val[1], prefix=prefix))

        reducer = reducer_klass(direct_datafile)
        datasets, fnames = reducer.reduce(reflect_datafile,
                                          save=save,
                                          background=val[2],
                                          **kwds_copy)

        if not index:
            datasets[0].scale(scale)

        combined_dataset.add_data(datasets[0].data,
                                  requires_splice=True,
                                  trim_trailing=trim_trailing)

    fname_dat = None

    if save:
        # this will give us <fname>.nx.hdf
        # if reflect_list was an integer you'll get PLP0000708.nx.hdf
        fname = number_datafile(reflect_list[0], prefix=prefix)
        # now chop off .nx.hdf extension
        fname = basename_datafile(fname)

        fname_dat = 'c_{0}.dat'.format(fname)
        with open(fname_dat, 'wb') as f:
            combined_dataset.save(f)
        fname_xml = 'c_{0}.xml'.format(fname)
        with open(fname_xml, 'wb') as f:
            combined_dataset.save_xml(f)

    return combined_dataset, fname_dat
コード例 #33
0
ファイル: reduce.py プロジェクト: brotwasme/refnx2019
    def _reduce_single_angle(self, scale=1):
        """
        Reduce a single angle.
        """
        n_spectra = self.reflected_beam.n_spectra
        n_tpixels = np.size(self.reflected_beam.m_topandtail, 1)
        n_ypixels = np.size(self.reflected_beam.m_topandtail, 2)

        # calculate omega and two_theta depending on the mode.
        mode = self.reflected_beam.mode

        # we'll need the wavelengths to calculate Q.
        wavelengths = self.reflected_beam.m_lambda
        m_twotheta = np.zeros((n_spectra, n_tpixels, n_ypixels))

        detector_z_difference = (self.reflected_beam.detector_z -
                                 self.direct_beam.detector_z)

        beampos_z_difference = (self.reflected_beam.m_beampos -
                                self.direct_beam.m_beampos)

        Y_PIXEL_SPACING = self.reflected_beam.cat.y_pixels_per_mm[0]

        total_z_deflection = (detector_z_difference +
                              beampos_z_difference * Y_PIXEL_SPACING)

        if mode in ['FOC', 'POL', 'POLANAL', 'MT']:
            # omega_nom.shape = (N, )
            omega_nom = np.degrees(
                np.arctan(total_z_deflection / self.reflected_beam.detector_y)
                / 2.)
            '''
            Wavelength specific angle of incidence correction
            This involves:
            1) working out the trajectory of the neutrons through the
            collimation system.
            2) where those neutrons intersect the sample.
            3) working out the elevation of the neutrons when they hit the
            sample.
            4) correcting the angle of incidence.
            '''
            speeds = general.wavelength_velocity(wavelengths)
            collimation_distance = self.reflected_beam.cat.collimation_distance
            s2_sample_distance = (self.reflected_beam.cat.sample_distance -
                                  self.reflected_beam.cat.slit2_distance)

            # work out the trajectories of the neutrons for them to pass
            # through the collimation system.
            trajectories = find_trajectory(collimation_distance / 1000., 0,
                                           speeds)

            # work out where the beam hits the sample
            res = parabola_line_intersection_point(s2_sample_distance / 1000,
                                                   0, trajectories, speeds,
                                                   omega_nom[:, np.newaxis])
            intersect_x, intersect_y, x_prime, elevation = res

            # correct the angle of incidence with a wavelength dependent
            # elevation.
            omega_corrected = omega_nom[:, np.newaxis] - elevation

            m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :]
            m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis]
            m_twotheta *= Y_PIXEL_SPACING
            m_twotheta += detector_z_difference
            m_twotheta /= (self.reflected_beam.detector_y[:, np.newaxis,
                                                          np.newaxis])
            m_twotheta = np.arctan(m_twotheta)
            m_twotheta = np.degrees(m_twotheta)

            # you may be reflecting upside down, reverse the sign.
            upside_down = np.sign(omega_corrected[:, 0])
            m_twotheta *= upside_down[:, np.newaxis, np.newaxis]
            omega_corrected *= upside_down[:, np.newaxis]

        elif mode in ['SB', 'DB']:
            # the angle of incidence is half the two theta of the reflected
            # beam
            omega = np.arctan(
                total_z_deflection / self.reflected_beam.detector_y) / 2.

            # work out two theta for each of the detector pixels
            m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :]
            m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis]
            m_twotheta += detector_z_difference
            m_twotheta -= (
                self.reflected_beam.detector_y[:, np.newaxis, np.newaxis] *
                np.tan(omega[:, np.newaxis, np.newaxis]))

            m_twotheta /= (self.reflected_beam.detector_y[:, np.newaxis,
                                                          np.newaxis])
            m_twotheta = np.arctan(m_twotheta)
            m_twotheta += omega[:, np.newaxis, np.newaxis]

            # still in radians at this point
            # add an extra dimension, because omega_corrected needs to be the
            # angle of incidence for each wavelength. I.e. should be
            # broadcastable to (N, T)
            omega_corrected = np.degrees(omega)[:, np.newaxis]
            m_twotheta = np.degrees(m_twotheta)
        '''
        --Specular Reflectivity--
        Use the (constant wavelength) spectra that have already been integrated
        over 2theta (in processnexus) to calculate the specular reflectivity.
        Beware: this is because m_topandtail has already been divided through
        by monitor counts and error propagated (at the end of processnexus).
        Thus, the 2theta pixels are correlated to some degree. If we use the 2D
        plot to calculate reflectivity
        (sum {Iref_{2theta, lambda}}/I_direct_{lambda}) then the error bars in
        the reflectivity turn out much larger than they should be.
        '''
        ydata, ydata_sd = EP.EPdiv(self.reflected_beam.m_spec,
                                   self.reflected_beam.m_spec_sd,
                                   self.direct_beam.m_spec,
                                   self.direct_beam.m_spec_sd)

        # calculate the 1D Qz values.
        xdata = general.q(omega_corrected, wavelengths)
        xdata_sd = (self.reflected_beam.m_lambda_fwhm /
                    self.reflected_beam.m_lambda)**2
        xdata_sd += (self.reflected_beam.domega[:, np.newaxis] /
                     omega_corrected)**2
        xdata_sd = np.sqrt(xdata_sd) * xdata
        '''
        ---Offspecular reflectivity---
        normalise the counts in the reflected beam by the direct beam
        spectrum this gives a reflectivity. Also propagate the errors,
        leaving the fractional variance (dr/r)^2.
        --Note-- that adjacent y-pixels (same wavelength) are correlated in
        this treatment, so you can't just sum over them.
        i.e. (c_0 / d) + ... + c_n / d) != (c_0 + ... + c_n) / d
        '''
        m_ref, m_ref_sd = EP.EPdiv(
            self.reflected_beam.m_topandtail,
            self.reflected_beam.m_topandtail_sd,
            self.direct_beam.m_spec[:, :, np.newaxis],
            self.direct_beam.m_spec_sd[:, :, np.newaxis])

        # you may have had divide by zero's.
        m_ref = np.where(np.isinf(m_ref), 0, m_ref)
        m_ref_sd = np.where(np.isinf(m_ref_sd), 0, m_ref_sd)

        # calculate the Q values for the detector pixels.  Each pixel has
        # different 2theta and different wavelength, ASSUME that they have the
        # same angle of incidence
        qx, qy, qz = general.q2(omega_corrected[:, :, np.newaxis], m_twotheta,
                                0, wavelengths[:, :, np.newaxis])

        reduction = {}
        reduction['x'] = self.x = xdata
        reduction['x_err'] = self.x_err = xdata_sd
        reduction['y'] = self.y = ydata / scale
        reduction['y_err'] = self.y_err = ydata_sd / scale
        reduction['omega'] = omega_corrected
        reduction['m_twotheta'] = m_twotheta
        reduction['m_ref'] = self.m_ref = m_ref
        reduction['m_ref_err'] = self.m_ref_err = m_ref_sd
        reduction['qz'] = self.m_qz = qz
        reduction['qx'] = self.m_qx = qx
        reduction['nspectra'] = self.n_spectra = n_spectra
        reduction['start_time'] = self.reflected_beam.start_time
        reduction['datafile_number'] = self.datafile_number = (
            self.reflected_beam.datafile_number)

        fnames = []
        datasets = []
        datafilename = self.reflected_beam.datafilename
        datafilename = os.path.basename(datafilename.split('.nx.hdf')[0])

        for i in range(n_spectra):
            data_tup = self.data(scanpoint=i)
            datasets.append(ReflectDataset(data_tup))

        if self.save:
            for i, dataset in enumerate(datasets):
                fname = '{0}_{1}.dat'.format(datafilename, i)
                fnames.append(fname)
                with open(fname, 'wb') as f:
                    dataset.save(f)

                fname = '{0}_{1}.xml'.format(datafilename, i)
                with open(fname, 'wb') as f:
                    dataset.save_xml(f, start_time=reduction['start_time'][i])

        reduction['fname'] = fnames
        return datasets, deepcopy(reduction)
コード例 #34
0
    def test_multipledataset_corefinement(self):
        # test corefinement of three datasets
        data361 = ReflectDataset(os.path.join(self.pth, 'e361r.txt'))
        data365 = ReflectDataset(os.path.join(self.pth, 'e365r.txt'))
        data366 = ReflectDataset(os.path.join(self.pth, 'e366r.txt'))

        si = SLD(2.07, name='Si')
        sio2 = SLD(3.47, name='SiO2')
        d2o = SLD(6.36, name='d2o')
        h2o = SLD(-0.56, name='h2o')
        cm3 = SLD(3.47, name='cm3')
        polymer = SLD(1, name='polymer')

        structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
        structure365 = si | structure361[1] | structure361[2] | cm3(0, 3)
        structure366 = si | structure361[1] | structure361[2] | h2o(0, 3)

        structure365[-1].rough = structure361[-1].rough
        structure366[-1].rough = structure361[-1].rough

        structure361[1].thick.setp(vary=True, bounds=(0, 20))
        structure361[2].thick.setp(value=200., bounds=(200., 250.), vary=True)
        structure361[2].sld.real.setp(vary=True, bounds=(0, 2))
        structure361[2].vfsolv.setp(value=5., bounds=(0., 100.), vary=True)

        model361 = ReflectModel(structure361, bkg=2e-5)
        model365 = ReflectModel(structure365, bkg=2e-5)
        model366 = ReflectModel(structure366, bkg=2e-5)

        model361.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
        model365.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
        model366.bkg.setp(vary=True, bounds=(1e-6, 5e-5))

        objective361 = Objective(model361, data361)
        objective365 = Objective(model365, data365)
        objective366 = Objective(model366, data366)

        global_objective = GlobalObjective(
            [objective361, objective365, objective366])
        # are the right numbers of parameters varying?
        assert_equal(len(global_objective.varying_parameters()), 7)

        # can we set the parameters?
        global_objective.setp(np.array([1e-5, 10, 212, 1, 10, 1e-5, 1e-5]))

        f = CurveFitter(global_objective)
        f.fit()

        indiv_chisqr = np.sum(
            [objective.chisqr() for objective in global_objective.objectives])

        # the overall chi2 should be sum of individual chi2
        global_chisqr = global_objective.chisqr()
        assert_almost_equal(global_chisqr, indiv_chisqr)

        # now check that the parameters were held in common correctly.
        slabs361 = structure361.slabs()
        slabs365 = structure365.slabs()
        slabs366 = structure366.slabs()

        assert_equal(slabs365[0:2, 0:5], slabs361[0:2, 0:5])
        assert_equal(slabs366[0:2, 0:5], slabs361[0:2, 0:5])
        assert_equal(slabs365[-1, 3], slabs361[-1, 3])
        assert_equal(slabs366[-1, 3], slabs361[-1, 3])

        # check that the residuals are the correct lengths
        res361 = objective361.residuals()
        res365 = objective365.residuals()
        res366 = objective366.residuals()
        res_global = global_objective.residuals()
        assert_allclose(res_global[0:len(res361)], res361, rtol=1e-5)
        assert_allclose(res_global[len(res361):len(res361) + len(res365)],
                        res365,
                        rtol=1e-5)
        assert_allclose(res_global[len(res361) + len(res365):],
                        res366,
                        rtol=1e-5)

        repr(global_objective)
コード例 #35
0
ファイル: test_reflectdataset.py プロジェクト: refnx/refnx
    def test_load(self):
        # load dataset from XML, via file handle
        dataset = ReflectDataset()
        with open(os.path.join(path, 'c_PLP0000708.xml')) as f:
            dataset.load(f)
        
        assert_equal(dataset.npoints, 90)
        assert_equal(90, np.size(dataset.x))

        # load dataset from XML, via string
        dataset = ReflectDataset()
        dataset.load(os.path.join(path, 'c_PLP0000708.xml'))

        assert_equal(dataset.npoints, 90)
        assert_equal(90, np.size(dataset.x))

        # load dataset from .dat, via file handle
        dataset1 = ReflectDataset()
        with open(os.path.join(path, 'c_PLP0000708.dat')) as f:
            dataset1.load(f)
        
        assert_equal(dataset1.npoints, 90)
        assert_equal(90, np.size(dataset1.x))

        # load dataset from .dat, via string
        dataset2 = ReflectDataset()
        dataset2.load(os.path.join(path, 'c_PLP0000708.dat'))

        assert_equal(dataset2.npoints, 90)
        assert_equal(90, np.size(dataset2.x))
コード例 #36
0
ファイル: model.py プロジェクト: tjmurdoch/refnx
    def reducer(self, callback=None):
        """
        Reduce all the entries in reduction_entries

        Parameters
        ----------
        callback : callable
            Function, `f(percent_finished)` that is called with the current
            percentage progress of the reduction
        """

        # refnx.reduce.reduce needs you to be in the directory where you're
        # going to write files to
        if self.output_directory:
            os.chdir(self.output_directory)

        # if no data directory was specified then assume it's the cwd
        data_directory = self.data_directory
        if not data_directory:
            data_directory = './'

        def full_path(fname):
            f = os.path.join(data_directory, fname)
            return f

        # if the streamed directory isn't mentioned then assume it's the same
        # as the data directory
        streamed_directory = self.streamed_directory
        if not os.path.isdir(streamed_directory):
            self.streamed_directory = data_directory

        logging.info('-------------------------------------------------------'
                     '\nStarting reduction run')
        logging.info(
            'data_folder={data_directory}, trim_trailing=True, '
            'lo_wavelength={low_wavelength}, '
            'hi_wavelength={high_wavelength}, '
            'rebin_percent={rebin_percent}, '
            'normalise={monitor_normalisation}, '
            'background={background_subtraction} '
            'eventmode={streamed_reduction} '
            'event_folder={streamed_directory}'.format(**self.__dict__))

        # sets up time slices for event reduction
        if self.streamed_reduction:
            eventmode = np.arange(self.stream_start,
                                  self.stream_end,
                                  self.stream_duration)
            eventmode = np.r_[eventmode, self.stream_end]
        else:
            eventmode = None

        # are you manual beamfinding?
        peak_pos = None
        if (self.manual_beam_find and
                self.manual_beam_finder is not None):
            peak_pos = -1

        idx = 0

        cached_direct_beams = {}

        for row, val in self.reduction_entries.items():
            if not val['use']:
                continue

            flood = None
            if val['flood']:
                flood = val['flood']

            combined_dataset = None

            # process entries one by one
            for ref, db in zip(['reflect-1', 'reflect-2', 'reflect-3'],
                               ['direct-1', 'direct-2', 'direct-3']):
                reflect = val[ref]
                direct = val[db]

                # if the file doesn't exist there's no point continuing
                if ((not os.path.isfile(full_path(reflect))) or
                        (not os.path.isfile(full_path(direct)))):
                    continue

                # which of the nspectra to reduce (or all)
                ref_pn = PlatypusNexus(reflect)

                if direct not in cached_direct_beams:
                    cached_direct_beams[direct] = PlatypusReduce(
                        direct,
                        data_folder=data_directory)

                reducer = cached_direct_beams[direct]

                reduced = reducer(
                    ref_pn, scale=val['scale'],
                    norm_file_num=flood,
                    lo_wavelength=self.low_wavelength,
                    hi_wavelength=self.high_wavelength,
                    rebin_percent=self.rebin_percent,
                    normalise=self.monitor_normalisation,
                    background=self.background_subtraction,
                    manual_beam_find=self.manual_beam_finder,
                    peak_pos=peak_pos,
                    eventmode=eventmode,
                    event_folder=streamed_directory)

                logging.info(
                    'Reduced {} vs {}, scale={}, angle={}'.format(
                        reflect, direct, val['scale'],
                        reduced['omega'][0, 0]))

                if combined_dataset is None:
                    combined_dataset = ReflectDataset()

                    fname = basename_datafile(reflect)
                    fname_dat = os.path.join(self.output_directory,
                                             'c_{0}.dat'.format(fname))
                    fname_xml = os.path.join(self.output_directory,
                                             'c_{0}.xml'.format(fname))

                combined_dataset.add_data(reducer.data(),
                                          requires_splice=True,
                                          trim_trailing=True)

            if combined_dataset is not None:
                # after you've finished reducing write a combined file.
                with open(fname_dat, 'wb') as f:
                    combined_dataset.save(f)
                with open(fname_xml, 'wb') as f:
                    combined_dataset.save_xml(f)
                logging.info(
                    'Written combined files: {} and {}'.format(
                        fname_dat, fname_xml))

            # can be used to create a progress bar
            idx += 1
            if callback is not None:
                ok = callback(100 * idx / len(self.reduction_entries))
                if not ok:
                    break

        logging.info('\nFinished reduction run'
                     '-------------------------------------------------------')
コード例 #37
0
ファイル: reduce.py プロジェクト: llimeht/refnx
def reduce_stitch(reflect_list, direct_list, norm_file_num=None,
                  data_folder=None, trim_trailing=True, save=True, **kwds):
    """
    Reduces a list of reflected beam run numbers and a list of corresponding
    direct beam run numbers from the Platypus reflectometer. If there are
    multiple reflectivity files they are spliced together.

    Parameters
    ----------
    reflect_list : list
        Reflected beam run numbers, e.g. `[708, 709, 710]`
        708 corresponds to the file PLP0000708.nx.hdf.
    direct_list : list
        Direct beam run numbers, e.g. `[711, 711, 711]`
    norm_file_num : int, optional
        The run number for the water flood field correction.
    data_folder : str, optional
        Where is the raw data stored?
    trim_trailing : bool, optional
        When datasets are spliced together do you want to remove points in the
        overlap region from the preceding dataset?
    save : bool, optional
        If `True` then the spliced file is written to a file (in the working
        directory) with a name like: `c_PLP0000708.dat`.
    kwds : dict, optional
        Options passed directly to `refnx.reduce.platypusnexus.process`,
        for processing of individual spectra. Look at that method docstring
        for specification of options.

    Returns
    -------
    combined_dataset, reduced_filename : refnx.dataset.ReflectDataset, str
        The combined dataset and the file name of the reduced data, if it was
        saved. If it wasn't saved `reduced_filename` is `None`.
    """
    scale = kwds.get('scale', 1.)

    # now reduce all the files.
    zipped = zip(reflect_list, direct_list)

    combined_dataset = ReflectDataset()

    if data_folder is None:
        data_folder = os.getcwd()

    if norm_file_num:
        norm_datafile = number_datafile(norm_file_num)
        kwds['h5norm'] = norm_datafile

    for index, val in enumerate(zipped):
        reflect_datafile = os.path.join(data_folder,
                                        number_datafile(val[0]))
        direct_datafile = os.path.join(data_folder,
                                       number_datafile(val[1]))

        reduced = ReducePlatypus(direct_datafile,
                                 reflect=reflect_datafile,
                                 save=save,
                                 **kwds)
        if not index:
            reduced.scale(scale)

        combined_dataset.add_data(reduced.data(),
                                  requires_splice=True,
                                  trim_trailing=trim_trailing)

    fname = None
    if save:
        fname = 'c_PLP{0:07d}.dat'.format(reflect_list[0])
        with open(fname, 'wb') as f:
            combined_dataset.save(f)
        fname = 'c_PLP{0:07d}.xml'.format(reflect_list[0])
        with open(fname, 'wb') as f:
            combined_dataset.save_xml(f)

    return combined_dataset, fname
コード例 #38
0
ファイル: reduce.py プロジェクト: llimeht/refnx
    def _reduce_single_angle(self, scale=1):
        """
        Reduce a single angle.
        """
        n_spectra = self.reflected_beam.n_spectra
        n_tpixels = np.size(self.reflected_beam.m_topandtail, 1)
        n_ypixels = np.size(self.reflected_beam.m_topandtail, 2)

        # calculate omega and two_theta depending on the mode.
        mode = self.reflected_beam.mode

        # we'll need the wavelengths to calculate Q.
        wavelengths = self.reflected_beam.m_lambda
        m_twotheta = np.zeros((n_spectra, n_tpixels, n_ypixels))

        if mode in ['FOC', 'POL', 'POLANAL', 'MT']:
            detector_z_difference = (self.reflected_beam.detector_z -
                                     self.direct_beam.detector_z)
            beampos_z_difference = (self.reflected_beam.m_beampos
                                    - self.direct_beam.m_beampos)

            total_z_deflection = (detector_z_difference
                                  + beampos_z_difference * Y_PIXEL_SPACING)

            # omega_nom.shape = (N, )
            omega_nom = np.degrees(np.arctan(total_z_deflection
                                   / self.reflected_beam.detector_y) / 2.)

            '''
            Wavelength specific angle of incidence correction
            This involves:
            1) working out the trajectory of the neutrons through the
            collimation system.
            2) where those neutrons intersect the sample.
            3) working out the elevation of the neutrons when they hit the
            sample.
            4) correcting the angle of incidence.
            '''
            speeds = general.wavelength_velocity(wavelengths)
            collimation_distance = self.reflected_beam.cat.collimation_distance
            s2_sample_distance = (self.reflected_beam.cat.sample_distance
                                  - self.reflected_beam.cat.slit2_distance)

            # work out the trajectories of the neutrons for them to pass
            # through the collimation system.
            trajectories = pm.find_trajectory(collimation_distance / 1000.,
                                              0, speeds)
            
            # work out where the beam hits the sample
            res = pm.parabola_line_intersection_point(s2_sample_distance / 1000,
                                                      0,
                                                      trajectories,
                                                      speeds,
                                                      omega_nom[:, np.newaxis])
            intersect_x, intersect_y, x_prime, elevation = res

            # correct the angle of incidence with a wavelength dependent
            # elevation.
            omega_corrected = omega_nom[:, np.newaxis] - elevation

        elif mode == 'SB' or mode == 'DB':
            omega = self.reflected_beam.M_beampos + self.reflected_beam.detectorZ[:, np.newaxis]
            omega -= self.direct_beam.M_beampos + self.direct_beam.detectorZ
            omega /= 2 * self.reflected_beam.detectorY[:, np.newaxis, np.newaxis]
            omega = np.arctan(omega)

            m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :] * Y_PIXEL_SPACING
            m_twotheta += self.reflected_beam.detectorZ[:, np.newaxis, np.newaxis]
            m_twotheta -= self.direct_beam.M_beampos[:, :, np.newaxis] + self.direct_beam.detectorZ
            m_twotheta -= self.reflected_beam.detectorY[:, np.newaxis, np.newaxis] * np.tan(omega[:, :, np.newaxis])

            m_twotheta /= self.reflected_beam.detectorY[:, np.newaxis, np.newaxis]
            m_twotheta = np.arctan(m_twotheta)
            m_twotheta += omega[:, :, np.newaxis]

        '''
        --Specular Reflectivity--
        Use the (constant wavelength) spectra that have already been integrated
        over 2theta (in processnexus) to calculate the specular reflectivity.
        Beware: this is because m_topandtail has already been divided through
        by monitor counts and error propagated (at the end of processnexus).
        Thus, the 2theta pixels are correlated to some degree. If we use the 2D
        plot to calculate reflectivity
        (sum {Iref_{2theta, lambda}}/I_direct_{lambda}) then the error bars in
        the reflectivity turn out much larger than they should be.
        '''
        ydata, ydata_sd = EP.EPdiv(self.reflected_beam.m_spec,
                                   self.reflected_beam.m_spec_sd,
                                   self.direct_beam.m_spec,
                                   self.direct_beam.m_spec_sd)

        # calculate the 1D Qz values.
        xdata = general.q(omega_corrected, wavelengths)
        xdata_sd = (self.reflected_beam.m_lambda_fwhm
                    / self.reflected_beam.m_lambda) ** 2
        xdata_sd += (self.reflected_beam.domega[:, np.newaxis]
                     / omega_corrected) ** 2
        xdata_sd = np.sqrt(xdata_sd) * xdata

        '''
        ---Offspecular reflectivity---
        normalise the counts in the reflected beam by the direct beam
        spectrum this gives a reflectivity. Also propagate the errors,
        leaving the fractional variance (dr/r)^2.
        --Note-- that adjacent y-pixels (same wavelength) are correlated in this
        treatment, so you can't just sum over them.
        i.e. (c_0 / d) + ... + c_n / d) != (c_0 + ... + c_n) / d
        '''
        m_ref, m_ref_sd = EP.EPdiv(self.reflected_beam.m_topandtail,
                                   self.reflected_beam.m_topandtail_sd,
                                   self.direct_beam.m_spec[:, :, np.newaxis],
                                   self.direct_beam.m_spec_sd[:, :, np.newaxis])

        # you may have had divide by zero's.
        m_ref = np.where(np.isinf(m_ref), 0, m_ref)
        m_ref_sd = np.where(np.isinf(m_ref_sd), 0, m_ref_sd)

        # calculate the Q values for the detector pixels.  Each pixel has
        # different 2theta and different wavelength, ASSUME that they have the
        # same angle of incidence
        qx, qy, qz = general.q2(omega_corrected[:, :, np.newaxis],
                                m_twotheta,
                                0,
                                wavelengths[:, :, np.newaxis])

        reduction = {}
        reduction['xdata'] = self.xdata = xdata
        reduction['xdata_sd'] = self.xdata_sd = xdata_sd
        reduction['ydata'] = self.ydata = ydata
        reduction['ydata_sd'] = self.ydata_sd = ydata_sd
        reduction['m_ref'] = self.m_ref = m_ref
        reduction['m_ref_sd'] = self.m_ref_sd = m_ref_sd
        reduction['qz'] = self.m_qz = qz
        reduction['qy'] = self.m_qy = qy
        reduction['nspectra'] = self.n_spectra = n_spectra
        reduction['datafile_number'] = self.datafile_number = (
            self.reflected_beam.datafile_number)

        fnames = []
        if self.save:
            for i in range(n_spectra):
                data_tup = self.data(scanpoint=i)
                dataset = ReflectDataset(data_tup)
                fname = 'PLP{0:07d}_{1}.dat'.format(self.datafile_number, i)
                fnames.append(fname)
                with open(fname, 'wb') as f:
                    dataset.save(f)
                fname = 'PLP{0:07d}_{1}.xml'.format(self.datafile_number, i)
                with open(fname, 'wb') as f:
                    dataset.save_xml(f)

        reduction['fname'] = fnames
        return deepcopy(reduction)