Beispiel #1
0
def NIST_runner(dataset, method='least_squares', chi_atol=1e-5,
                val_rtol=1e-2, err_rtol=5e-3):
    NIST_dataset = ReadNistData(dataset)
    x, y = (NIST_dataset['x'], NIST_dataset['y'])

    if dataset == 'Nelson':
        y = np.log(y)

    params = NIST_dataset['start']

    fitfunc = NIST_Models[dataset][0]
    model = Model(params, fitfunc)
    objective = Objective(model, (x, y))
    fitter = CurveFitter(objective)
    result = fitter.fit(method=method)

    assert_allclose(objective.chisqr(),
                    NIST_dataset['sum_squares'],
                    atol=chi_atol)

    certval = NIST_dataset['cert_values']
    assert_allclose(result.x, certval, rtol=val_rtol)

    if 'stderr' in result:
        certerr = NIST_dataset['cert_stderr']
        assert_allclose(result.stderr, certerr, rtol=err_rtol)
Beispiel #2
0
def NIST_runner(
    dataset,
    method="least_squares",
    chi_atol=1e-5,
    val_rtol=1e-2,
    err_rtol=6e-3,
):
    NIST_dataset = ReadNistData(dataset)
    x, y = (NIST_dataset["x"], NIST_dataset["y"])

    if dataset == "Nelson":
        y = np.log(y)

    params = NIST_dataset["start"]

    fitfunc = NIST_Models[dataset][0]
    model = Model(params, fitfunc)
    objective = Objective(model, (x, y))
    fitter = CurveFitter(objective)
    result = fitter.fit(method=method)

    assert_allclose(objective.chisqr(),
                    NIST_dataset["sum_squares"],
                    atol=chi_atol)

    certval = NIST_dataset["cert_values"]
    assert_allclose(result.x, certval, rtol=val_rtol)

    if "stderr" in result:
        certerr = NIST_dataset["cert_stderr"]
        assert_allclose(result.stderr, certerr, rtol=err_rtol)
Beispiel #3
0
    def test_parallel_objective(self):
        # check that a parallel objective works without issue
        # (it could be possible that parallel evaluation fails at a higher
        #  level in e.g. emcee or in scipy.optimize.differential_evolution)
        model = self.model361
        model.threads = 2

        objective = Objective(
            model,
            (self.qvals361, self.rvals361, self.evals361),
            transform=Transform("logY"),
        )
        p0 = np.array(objective.varying_parameters())
        cov = objective.covar()
        walkers = np.random.multivariate_normal(np.atleast_1d(p0),
                                                np.atleast_2d(cov),
                                                size=(100))
        map_logl = np.array(list(map(objective.logl, walkers)))
        map_chi2 = np.array(list(map(objective.chisqr, walkers)))

        wf = Wrapper_fn2(model.model, p0)
        map_mod = np.array(list(map(wf, walkers)))

        with MapWrapper(2) as g:
            mapw_mod = g(wf, walkers)
            mapw_logl = g(objective.logl, walkers)
            mapw_chi2 = g(objective.chisqr, walkers)
        assert_allclose(mapw_logl, map_logl)
        assert_allclose(mapw_chi2, map_chi2)
        assert_allclose(mapw_mod, map_mod)
    def setup_method(self):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        self.si = SLD(2.07, name='Si')
        self.sio2 = SLD(3.47, name='SiO2')
        self.d2o = SLD(6.36, name='d2o')
        self.h2o = SLD(-0.56, name='h2o')
        self.cm3 = SLD(3.5, name='cm3')
        self.polymer = SLD(2, name='polymer')

        self.sio2_l = self.sio2(40, 3)
        self.polymer_l = self.polymer(200, 3)

        self.structure = (self.si | self.sio2_l | self.polymer_l
                          | self.d2o(0, 3))

        fname = os.path.join(self.pth, 'c_PLP0011859_q.txt')

        self.dataset = ReflectDataset(fname)
        self.model = ReflectModel(self.structure, bkg=2e-7)
        self.objective = Objective(self.model,
                                   self.dataset,
                                   use_weights=False,
                                   transform=Transform('logY'))
        self.global_objective = GlobalObjective([self.objective])
Beispiel #5
0
 def _update_analysis_objects(self):
     use_weights = self.use_weights.value == 'Yes'
     self.objective = Objective(self.model,
                                self.dataset,
                                transform=self.transform,
                                use_weights=use_weights)
     self._curvefitter = None
Beispiel #6
0
    def setup_method(self, tmpdir):
        self.path = os.path.dirname(os.path.abspath(__file__))
        self.tmpdir = tmpdir.strpath

        theoretical = np.loadtxt(os.path.join(self.path, "gauss_data.txt"))
        xvals, yvals, evals = np.hsplit(theoretical, 3)
        xvals = xvals.flatten()
        yvals = yvals.flatten()
        evals = evals.flatten()

        # these best weighted values and uncertainties obtained with Igor
        self.best_weighted = [-0.00246095, 19.5299, -8.28446e-2, 1.24692]

        self.best_weighted_errors = [
            0.0220313708486,
            1.12879436221,
            0.0447659158681,
            0.0412022938883,
        ]

        self.best_weighted_chisqr = 77.6040960351

        self.best_unweighted = [
            -0.10584111872702096,
            19.240347049328989,
            0.0092623066070940396,
            1.501362314145845,
        ]

        self.best_unweighted_errors = [
            0.34246565477,
            0.689820935208,
            0.0411243173041,
            0.0693429375282,
        ]

        self.best_unweighted_chisqr = 497.102084956

        self.p0 = np.array([0.1, 20.0, 0.1, 0.1])
        self.names = ["bkg", "A", "x0", "width"]
        self.bounds = [(-1, 1), (0, 30), (-5.0, 5.0), (0.001, 2)]

        self.params = Parameters(name="gauss_params")
        for p, name, bound in zip(self.p0, self.names, self.bounds):
            param = Parameter(p, name=name)
            param.range(*bound)
            param.vary = True
            self.params.append(param)

        self.model = Model(self.params, fitfunc=gauss)
        self.data = Data1D((xvals, yvals, evals))
        self.objective = Objective(self.model, self.data)
        return 0
Beispiel #7
0
def test_run():
    print("\n\n\n")
    from refnx.dataset import Data1D
    from refnx.dataset import ReflectDataset
    import refnx
    import data_in
    data = data_in.data_in('d2o/29553_54.dat')
    # dataset = data # ...
    data = Data1D(data)
    from make_egg import bsla_thesis
    #     air = SLD(0)
    #     air = air(0,0)
    bt = bsla_thesis()
    bt.interface_protein_solvent.setp(vary=True, bounds=(11, 40))
    bt.protein_length.setp(vary=True, bounds=(25, 55))
    bt.number_of_water_molecules.setp(vary=True, bounds=(1, 10000))
    bt.interface_width_air_solvent.setp(vary=True, bounds=(0.001, 30))
    #bt.interface_width_protein_solvent.setp(vary=True, bounds=(0, 5))
    bt.sld_of_protein.setp(vary=True, bounds=(1.92, 6.21))  # *(10**(-6))
    bt.d2o_to_h2o_ratio.setp(vary=True, bounds=(0, 1))
    #     if isinstance(bt, Component):
    #         print("it is comp")
    #     if isinstance(bt, Structure):
    #         print("it is")
    #print(bt.parameters)
    #     d2o = 1.9185/0.3
    #     h2o = -0.1635/0.3
    #     solvent = SLD((bt.d2o_to_h2o_ratio.value*d2o + (1-bt.d2o_to_h2o_ratio.value)*h2o))
    #     solvent = solvent(0,0)
    #     from refnx.reflect import Structure
    #     structure = air|bt|solvent
    #     structure.name = "bsla"
    from refnx.reflect import ReflectModel
    model = ReflectModel(bt)  #structure)
    from refnx.analysis import Transform, CurveFitter, Objective
    objective = Objective(model, data)
    fitter = CurveFitter(objective)
    fitter.fit('differential_evolution')
    import matplotlib.pyplot as plt
    #%matplotlib notebook
    #     plt.plot(*bt.sld_profile())
    objective.plot()
    plt.yscale('log')
    plt.xscale('log')
    plt.xlabel('Q')
    plt.ylabel('Reflectivity')
    plt.legend()
    print(bt)
    plt.show()
Beispiel #8
0
def setup():
    # load the data.
    DATASET_NAME = os.path.join(refnx.__path__[0],
                                'analysis',
                                'test',
                                'c_PLP0011859_q.txt')

    # load the data
    data = ReflectDataset(DATASET_NAME)

    # the materials we're using
    si = SLD(2.07, name='Si')
    sio2 = SLD(3.47, name='SiO2')
    film = SLD(2, name='film')
    d2o = SLD(6.36, name='d2o')

    structure = si | sio2(30, 3) | film(250, 3) | d2o(0, 3)
    structure[1].thick.setp(vary=True, bounds=(15., 50.))
    structure[1].rough.setp(vary=True, bounds=(1., 6.))
    structure[2].thick.setp(vary=True, bounds=(200, 300))
    structure[2].sld.real.setp(vary=True, bounds=(0.1, 3))
    structure[2].rough.setp(vary=True, bounds=(1, 6))

    model = ReflectModel(structure, bkg=9e-6, scale=1.)
    model.bkg.setp(vary=True, bounds=(1e-8, 1e-5))
    model.scale.setp(vary=True, bounds=(0.9, 1.1))
    model.threads = 1
    # fit on a logR scale, but use weighting
    objective = Objective(model, data, transform=Transform('logY'),
                          use_weights=True)

    return objective
Beispiel #9
0
    def setup(self):
        pth = os.path.dirname(os.path.abspath(refnx.reflect.__file__))
        e361 = RD(os.path.join(pth, 'test', 'e361r.txt'))

        sio2 = SLD(3.47, name='SiO2')
        si = SLD(2.07, name='Si')
        d2o = SLD(6.36, name='D2O')
        polymer = SLD(1, name='polymer')

        # e361 is an older dataset, but well characterised
        structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
        model361 = ReflectModel(structure361, bkg=2e-5)

        model361.scale.vary = True
        model361.bkg.vary = True
        model361.scale.range(0.1, 2)
        model361.bkg.range(0, 5e-5)
        model361.dq = 5.

        # d2o
        structure361[-1].sld.real.vary = True
        structure361[-1].sld.real.range(6, 6.36)

        structure361[1].thick.vary = True
        structure361[1].thick.range(5, 20)
        structure361[2].thick.vary = True
        structure361[2].thick.range(100, 220)

        structure361[2].sld.real.vary = True
        structure361[2].sld.real.range(0.2, 1.5)

        e361.x_err = None
        objective = Objective(model361, e361)
        self.fitter = CurveFitter(objective, nwalkers=200)
        self.fitter.initialise('jitter')
Beispiel #10
0
    def setup(self):
        # Reproducible results!
        np.random.seed(123)

        m_true = -0.9594
        b_true = 4.294
        f_true = 0.534
        m_ls = -1.1040757010910947
        b_ls = 5.4405552502319505

        # Generate some synthetic data from the model.
        N = 50
        x = np.sort(10 * np.random.rand(N))
        y_err = 0.1 + 0.5 * np.random.rand(N)
        y = m_true * x + b_true
        y += np.abs(f_true * y) * np.random.randn(N)
        y += y_err * np.random.randn(N)

        data = Data1D(data=(x, y, y_err))

        p = Parameter(b_ls, 'b', vary=True, bounds=(-100, 100))
        p |= Parameter(m_ls, 'm', vary=True, bounds=(-100, 100))

        model = Model(p, fitfunc=line)
        self.objective = Objective(model, data)
        self.mcfitter = CurveFitter(self.objective)
        self.mcfitter_t = CurveFitter(self.objective, ntemps=20)

        self.mcfitter.initialise('prior')
        self.mcfitter_t.initialise('prior')
Beispiel #11
0
    def test_code_fragment(self):
        e361 = ReflectDataset(os.path.join(self.pth, "e361r.txt"))

        si = SLD(2.07, name="Si")
        sio2 = SLD(3.47, name="SiO2")
        d2o = SLD(6.36, name="D2O")
        polymer = SLD(1, name="polymer")

        # e361 is an older dataset, but well characterised
        self.structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
        self.model361 = ReflectModel(self.structure361, bkg=2e-5)

        self.model361.scale.vary = True
        self.model361.bkg.vary = True
        self.model361.scale.range(0.1, 2)
        self.model361.bkg.range(0, 5e-5)

        # d2o
        self.structure361[-1].sld.real.vary = True
        self.structure361[-1].sld.real.range(6, 6.36)

        self.structure361[1].thick.vary = True
        self.structure361[1].thick.range(5, 20)

        self.structure361[2].thick.vary = True
        self.structure361[2].thick.range(100, 220)

        self.structure361[2].sld.real.vary = True
        self.structure361[2].sld.real.range(0.2, 1.5)

        objective = Objective(self.model361, e361, transform=Transform("logY"))
        objective2 = eval(repr(objective))
        assert_allclose(objective2.chisqr(), objective.chisqr())

        exec(repr(objective))
        exec(code_fragment(objective))

        # artificially link the two thicknesses together
        # check that we can reproduce the objective from the repr
        self.structure361[2].thick.constraint = self.structure361[1].thick
        fragment = code_fragment(objective)
        fragment = fragment + "\nobj = objective()\nresult = obj.chisqr()"
        d = {}
        # need to provide the globals dictionary to exec, so it can see imports
        # e.g. https://bit.ly/2RFOF7i (from stackoverflow)
        exec(fragment, globals(), d)
        assert_allclose(d["result"], objective.chisqr())
Beispiel #12
0
    def test_reflectivity_emcee(self):
        model = self.model361
        model.dq = 5.
        objective = Objective(model,
                              (self.qvals361, self.rvals361, self.evals361),
                              transform=Transform('logY'))
        fitter = CurveFitter(objective, nwalkers=100)

        assert_(len(objective.generative().shape) == 1)
        assert_(len(objective.residuals().shape) == 1)

        res = fitter.fit('least_squares')
        res_mcmc = fitter.sample(steps=5, nthin=10, random_state=1,
                                 verbose=False)

        mcmc_val = [mcmc_result.median for mcmc_result in res_mcmc]
        assert_allclose(mcmc_val, res.x, rtol=0.05)
Beispiel #13
0
 def test_reflectivity_fit(self):
     # a smoke test to make sure the reflectivity fit proceeds
     model = self.model361
     objective = Objective(model,
                           (self.qvals361, self.rvals361, self.evals361),
                           transform=Transform('logY'))
     fitter = CurveFitter(objective)
     with np.errstate(invalid='raise'):
         fitter.fit('differential_evolution')
Beispiel #14
0
    def setup_method(self):
        # Choose the "true" parameters.

        # Reproducible results!
        np.random.seed(123)

        self.m_true = -0.9594
        self.b_true = 4.294
        self.f_true = 0.534
        self.m_ls = -1.1040757010910947
        self.b_ls = 5.4405552502319505

        # Generate some synthetic data from the model.
        N = 50
        x = np.sort(10 * np.random.rand(N))
        y_err = 0.1 + 0.5 * np.random.rand(N)
        y = self.m_true * x + self.b_true
        y += np.abs(self.f_true * y) * np.random.randn(N)
        y += y_err * np.random.randn(N)

        self.data = Data1D(data=(x, y, y_err))

        self.p = Parameter(self.b_ls, 'b') | Parameter(self.m_ls, 'm')
        self.model = Model(self.p, fitfunc=line)
        self.objective = Objective(self.model, self.data)

        # want b and m
        self.p[0].vary = True
        self.p[1].vary = True

        mod = np.array([4.78166609, 4.42364699, 4.16404064, 3.50343504,
                        3.4257084, 2.93594347, 2.92035638, 2.67533842,
                        2.28136038, 2.19772983, 1.99295496, 1.93748334,
                        1.87484436, 1.65161016, 1.44613461, 1.11128101,
                        1.04584535, 0.86055984, 0.76913963, 0.73906649,
                        0.73331407, 0.68350418, 0.65216599, 0.59838566,
                        0.13070299, 0.10749131, -0.01010195, -0.10010155,
                        -0.29495372, -0.42817431, -0.43122391, -0.64637715,
                        -1.30560686, -1.32626428, -1.44835768, -1.52589881,
                        -1.56371158, -2.12048349, -2.24899179, -2.50292682,
                        -2.53576659, -2.55797996, -2.60870542, -2.7074727,
                        -3.93781479, -4.12415366, -4.42313742, -4.98368609,
                        -5.38782395, -5.44077086])
        self.mod = mod
Beispiel #15
0
    def __init__(self, file_path, layers, predicted_slds, predicted_depths, xray):
        """Initialises the Model class by creating a refnx model with given predicted values.

        Args:
            file_path (string): a path to the file with the data to construct the model for.
            layers (int): the number of layers for the model predicted by the classifier.
            predicted_slds (ndarray): an array of predicted SLDs for each layer.
            predicted_depths (ndarray): an array of predicted depths for each layer.
            xray (Boolean): whether the model should use a neutron or x-ray probe.

        """
        self.structure = SLD(0, name='Air') #Model starts with air.

        if xray: #Use x-ray probe
            for i in range(layers):
                density = predicted_slds[i] / XRayGenerator.density_constant
                SLD_layer = MaterialSLD(XRayGenerator.material, density, probe='x-ray', wavelength=XRayGenerator.wavelength, name='Layer {}'.format(i+1))
                layer = SLD_layer(thick=predicted_depths[i], rough=Model.roughness)
                layer.density.setp(bounds=XRayGenerator.density_bounds, vary=True)
                layer.thick.setp(bounds=ImageGenerator.depth_bounds, vary=True)
                layer.rough.setp(bounds=Model.rough_bounds, vary=True)
                self.structure = self.structure | layer  #Next comes each layer.
            #Then substrate
            si_substrate = MaterialSLD(XRayGenerator.material, XRayGenerator.substrate_density, probe='x-ray', name='Si Substrate')(thick=0, rough=Model.roughness)

        else: #Use neutron probe
            for i in range(layers):
                layer = SLD(predicted_slds[i], name='Layer {}'.format(i+1))(thick=predicted_depths[i], rough=Model.roughness)
                layer.sld.real.setp(bounds=ImageGenerator.sld_neutron_bounds, vary=True)
                layer.thick.setp(bounds=ImageGenerator.depth_bounds, vary=True)
                layer.rough.setp(bounds=Model.rough_bounds, vary=True)
                self.structure = self.structure | layer  #Next comes each layer.
            #Then substrate
            si_substrate = SLD(Model.si_sld, name='Si Substrate')(thick=0, rough=Model.roughness)

        si_substrate.rough.setp(bounds=Model.rough_bounds, vary=True)
        self.structure = self.structure | si_substrate

        data = self.__load_data(file_path) #Pre-process and load given dataset.
        self.model = ReflectModel(self.structure, scale=Model.scale, dq=Model.dq, bkg=Model.bkg)
        self.objective = Objective(self.model, data)
Beispiel #16
0
    def test_residuals(self):
        # weighted, with and without transform
        assert_almost_equal(self.objective.residuals(),
                            (self.data.y - self.mod) / self.data.y_err)

        objective = Objective(self.model, self.data,
                              transform=Transform('lin'))
        assert_almost_equal(objective.residuals(),
                            (self.data.y - self.mod) / self.data.y_err)

        # unweighted, with and without transform
        objective = Objective(self.model, self.data, use_weights=False)
        assert_almost_equal(objective.residuals(),
                            self.data.y - self.mod)

        objective = Objective(self.model, self.data, use_weights=False,
                              transform=Transform('lin'))
        assert_almost_equal(objective.residuals(),
                            self.data.y - self.mod)
Beispiel #17
0
    def test_modelvals_degenerate_layers(self):
        # try fitting dataset with a deposited layer split into two degenerate
        # layers
        fname = os.path.join(self.pth, "c_PLP0011859_q.txt")
        dataset = ReflectDataset(fname)

        sio2 = SLD(3.47, name="SiO2")
        si = SLD(2.07, name="Si")
        d2o = SLD(6.36, name="D2O")
        polymer = SLD(2.0, name="polymer")

        sio2_l = sio2(30, 3)
        polymer_l = polymer(125, 3)

        structure = si | sio2_l | polymer_l | polymer_l | d2o(0, 3)

        polymer_l.thick.setp(value=125, vary=True, bounds=(0, 250))
        polymer_l.rough.setp(value=4, vary=True, bounds=(0, 8))
        structure[-1].rough.setp(vary=True, bounds=(0, 6))
        sio2_l.rough.setp(value=3.16, vary=True, bounds=(0, 8))

        model = ReflectModel(structure, bkg=2e-6)
        objective = Objective(model,
                              dataset,
                              use_weights=False,
                              transform=Transform("logY"))

        model.scale.setp(vary=True, bounds=(0, 2))
        model.bkg.setp(vary=True, bounds=(0, 8e-6))

        slabs = structure.slabs()
        assert_equal(slabs[2, 0:2], slabs[3, 0:2])
        assert_equal(slabs[2, 3], slabs[3, 3])
        assert_equal(slabs[1, 3], sio2_l.rough.value)

        f = CurveFitter(objective)
        f.fit(method="differential_evolution", seed=1, maxiter=3)

        slabs = structure.slabs()
        assert_equal(slabs[2, 0:2], slabs[3, 0:2])
        assert_equal(slabs[2, 3], slabs[3, 3])
Beispiel #18
0
def make_model(names, bs, thicks, roughs, fig_i, data, show=False, mcmc=False):
    no_layers = len(bs)
    layers = []
    for i in range(no_layers):
        names.append('layer' + str(i))

    for i in range(no_layers):
        sld = SLD(bs[i], name=names[i])
        layers.append(sld(thicks[i], roughs[i]))

    layers[0].thick.setp(vary=True, bounds=(thicks[i] - 1, thicks[i] + 1))
    layers[0].sld.real.setp(vary=True, bounds=(bs[i] - 1, bs[i] + 1))

    for layer in layers[1:]:
        layer.thick.setp(vary=True, bounds=(thicks[i] - 1, thicks[i] + 1))
        layer.sld.real.setp(vary=True, bounds=(bs[i] - 1, bs[i] + 1))
        layer.rough.setp(vary=True, bounds=(0, 5))

    structure = layers[0]
    for layer in layers[1:]:
        structure |= layer
    print(structure)
    model = ReflectModel(structure, bkg=3e-6, dq=5.0)
    #model.scale.setp(bounds=(0.6, 1.2), vary=True)
    #model.bkg.setp(bounds=(1e-9, 9e-6), vary=True)
    objective = Objective(model, data, transform=Transform('logY'))
    fitter = CurveFitter(objective)
    if mcmc:
        fitter.sample(1000)
        process_chain(objective, fitter.chain, nburn=300, nthin=100)
    else:
        fitter.fit('differential_evolution')
    print(objective.parameters)

    if show:
        plt.figure(fig_i)
        plt.plot(*structure.sld_profile())
        plt.ylabel('SLD /$10^{-6} \AA^{-2}$')
        plt.xlabel('distance / $\AA$')
    return structure, fitter, objective, fig_i + 1
Beispiel #19
0
    def test_resolution_speed_comparator(self):
        fname = os.path.join(self.pth, "c_PLP0011859_q.txt")
        dataset = ReflectDataset(fname)

        sio2 = SLD(3.47, name="SiO2")
        si = SLD(2.07, name="Si")
        d2o = SLD(6.36, name="D2O")
        polymer = SLD(2.0, name="polymer")

        sio2_l = sio2(30, 3)
        polymer_l = polymer(125, 3)

        dx = dataset.x_err
        structure = si | sio2_l | polymer_l | polymer_l | d2o(0, 3)
        model = ReflectModel(structure, bkg=2e-6, dq_type="constant")
        objective = Objective(model,
                              dataset,
                              use_weights=False,
                              transform=Transform("logY"))

        # check that choose_resolution_approach doesn't change state
        # of model
        fastest_method = choose_dq_type(objective)
        assert model.dq_type == "constant"
        assert_equal(dx, objective.data.x_err)

        # check that the comparison worked
        const_time = time.time()
        for i in range(1000):
            objective.generative()
        const_time = time.time() - const_time

        model.dq_type = "pointwise"
        point_time = time.time()
        for i in range(1000):
            objective.generative()
        point_time = time.time() - point_time

        if fastest_method == "pointwise":
            assert point_time < const_time
        elif fastest_method == "constant":
            assert const_time < point_time

        # check that we could use the function to setup a reflectmodel
        ReflectModel(structure, bkg=2e-6, dq_type=choose_dq_type(objective))
Beispiel #20
0
def make_model(names, bs, thicks, roughs, fig_i, data, show=False, mcmc=False):
    extent = sum(
        thicks[:, 0])  # (float or Parameter) – Total extent of spline region
    vs = array(
        bs
    )[:,
      0]  #(Sequence of float/Parameter) – the real part of the SLD values of each of the knots.
    dz = cum_sum(
        array(thicks[:, 0])
    )  #(Sequence of float/Parameter) – the lateral offset between successive knots.
    print(dz)
    name = "number of nots " + str(len(names))  #(str) – Name of component
    component = Spline(extent, vs, dz, name)
    front = SLD(0)
    front = front(0, 0)
    back = SLD(0)
    back = back(0, 0)
    structure = front | component | back
    model = ReflectModel(structure, bkg=3e-6, dq=5.0)
    objective = Objective(model, data, transform=Transform('logY'))
    fitter = CurveFitter(objective)
    fitter.fit('differential_evolution')
    return structure, fitter, objective, fig_i + 1
class TestGlobalFitting(object):
    def setup_method(self):
        self.pth = os.path.dirname(os.path.abspath(__file__))

        self.si = SLD(2.07, name='Si')
        self.sio2 = SLD(3.47, name='SiO2')
        self.d2o = SLD(6.36, name='d2o')
        self.h2o = SLD(-0.56, name='h2o')
        self.cm3 = SLD(3.5, name='cm3')
        self.polymer = SLD(2, name='polymer')

        self.sio2_l = self.sio2(40, 3)
        self.polymer_l = self.polymer(200, 3)

        self.structure = (self.si | self.sio2_l | self.polymer_l
                          | self.d2o(0, 3))

        fname = os.path.join(self.pth, 'c_PLP0011859_q.txt')

        self.dataset = ReflectDataset(fname)
        self.model = ReflectModel(self.structure, bkg=2e-7)
        self.objective = Objective(self.model,
                                   self.dataset,
                                   use_weights=False,
                                   transform=Transform('logY'))
        self.global_objective = GlobalObjective([self.objective])

    def test_residuals_length(self):
        # the residuals should be the same length as the data
        residuals = self.global_objective.residuals()
        assert_equal(residuals.size, len(self.dataset))

    def test_globalfitting(self):
        # smoke test for can the global fitting run?
        # also tests that global fitting gives same output as
        # normal fitting (for a single dataset)
        self.model.scale.setp(vary=True, bounds=(0.1, 2))
        self.model.bkg.setp(vary=True, bounds=(1e-10, 8e-6))
        self.structure[-1].rough.setp(vary=True, bounds=(0.2, 6))
        self.sio2_l.thick.setp(vary=True, bounds=(0.2, 80))
        self.polymer_l.thick.setp(bounds=(0.01, 400), vary=True)
        self.polymer_l.sld.real.setp(vary=True, bounds=(0.01, 4))

        self.objective.transform = Transform('logY')

        starting = np.array(self.objective.parameters)
        with np.errstate(invalid='raise'):
            g = CurveFitter(self.global_objective)
            res_g = g.fit()

            # need the same starting point
            self.objective.setp(starting)
            f = CurveFitter(self.objective)
            res_f = f.fit()

            # individual and global should give the same fit.
            assert_almost_equal(res_g.x, res_f.x)

    def test_multipledataset_corefinement(self):
        # test corefinement of three datasets
        data361 = ReflectDataset(os.path.join(self.pth, 'e361r.txt'))
        data365 = ReflectDataset(os.path.join(self.pth, 'e365r.txt'))
        data366 = ReflectDataset(os.path.join(self.pth, 'e366r.txt'))

        si = SLD(2.07, name='Si')
        sio2 = SLD(3.47, name='SiO2')
        d2o = SLD(6.36, name='d2o')
        h2o = SLD(-0.56, name='h2o')
        cm3 = SLD(3.47, name='cm3')
        polymer = SLD(1, name='polymer')

        structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
        structure365 = si | structure361[1] | structure361[2] | cm3(0, 3)
        structure366 = si | structure361[1] | structure361[2] | h2o(0, 3)

        structure365[-1].rough = structure361[-1].rough
        structure366[-1].rough = structure361[-1].rough

        structure361[1].thick.setp(vary=True, bounds=(0, 20))
        structure361[2].thick.setp(value=200., bounds=(200., 250.), vary=True)
        structure361[2].sld.real.setp(vary=True, bounds=(0, 2))
        structure361[2].vfsolv.setp(value=5., bounds=(0., 100.), vary=True)

        model361 = ReflectModel(structure361, bkg=2e-5)
        model365 = ReflectModel(structure365, bkg=2e-5)
        model366 = ReflectModel(structure366, bkg=2e-5)

        model361.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
        model365.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
        model366.bkg.setp(vary=True, bounds=(1e-6, 5e-5))

        objective361 = Objective(model361, data361)
        objective365 = Objective(model365, data365)
        objective366 = Objective(model366, data366)

        global_objective = GlobalObjective(
            [objective361, objective365, objective366])
        # are the right numbers of parameters varying?
        assert_equal(len(global_objective.varying_parameters()), 7)

        # can we set the parameters?
        global_objective.setp(np.array([1e-5, 10, 212, 1, 10, 1e-5, 1e-5]))

        f = CurveFitter(global_objective)
        f.fit()

        indiv_chisqr = np.sum(
            [objective.chisqr() for objective in global_objective.objectives])

        # the overall chi2 should be sum of individual chi2
        global_chisqr = global_objective.chisqr()
        assert_almost_equal(global_chisqr, indiv_chisqr)

        # now check that the parameters were held in common correctly.
        slabs361 = structure361.slabs()
        slabs365 = structure365.slabs()
        slabs366 = structure366.slabs()

        assert_equal(slabs365[0:2, 0:5], slabs361[0:2, 0:5])
        assert_equal(slabs366[0:2, 0:5], slabs361[0:2, 0:5])
        assert_equal(slabs365[-1, 3], slabs361[-1, 3])
        assert_equal(slabs366[-1, 3], slabs361[-1, 3])

        # check that the residuals are the correct lengths
        res361 = objective361.residuals()
        res365 = objective365.residuals()
        res366 = objective366.residuals()
        res_global = global_objective.residuals()
        assert_allclose(res_global[0:len(res361)], res361, rtol=1e-5)
        assert_allclose(res_global[len(res361):len(res361) + len(res365)],
                        res365,
                        rtol=1e-5)
        assert_allclose(res_global[len(res361) + len(res365):],
                        res366,
                        rtol=1e-5)

        repr(global_objective)
Beispiel #22
0
class TestFitterGauss(object):
    # Test CurveFitter with a noisy gaussian, weighted and unweighted, to see
    # if the parameters and uncertainties come out correct

    @pytest.fixture(autouse=True)
    def setup_method(self, tmpdir):
        self.path = os.path.dirname(os.path.abspath(__file__))
        self.tmpdir = tmpdir.strpath

        theoretical = np.loadtxt(os.path.join(self.path, "gauss_data.txt"))
        xvals, yvals, evals = np.hsplit(theoretical, 3)
        xvals = xvals.flatten()
        yvals = yvals.flatten()
        evals = evals.flatten()

        # these best weighted values and uncertainties obtained with Igor
        self.best_weighted = [-0.00246095, 19.5299, -8.28446e-2, 1.24692]

        self.best_weighted_errors = [
            0.0220313708486,
            1.12879436221,
            0.0447659158681,
            0.0412022938883,
        ]

        self.best_weighted_chisqr = 77.6040960351

        self.best_unweighted = [
            -0.10584111872702096,
            19.240347049328989,
            0.0092623066070940396,
            1.501362314145845,
        ]

        self.best_unweighted_errors = [
            0.34246565477,
            0.689820935208,
            0.0411243173041,
            0.0693429375282,
        ]

        self.best_unweighted_chisqr = 497.102084956

        self.p0 = np.array([0.1, 20.0, 0.1, 0.1])
        self.names = ["bkg", "A", "x0", "width"]
        self.bounds = [(-1, 1), (0, 30), (-5.0, 5.0), (0.001, 2)]

        self.params = Parameters(name="gauss_params")
        for p, name, bound in zip(self.p0, self.names, self.bounds):
            param = Parameter(p, name=name)
            param.range(*bound)
            param.vary = True
            self.params.append(param)

        self.model = Model(self.params, fitfunc=gauss)
        self.data = Data1D((xvals, yvals, evals))
        self.objective = Objective(self.model, self.data)
        return 0

    def test_pickle(self):
        # tests if a CurveFitter can be pickled/unpickled.
        f = CurveFitter(self.objective)
        pkl = pickle.dumps(f)
        g = pickle.loads(pkl)
        g._check_vars_unchanged()

    def test_best_weighted(self):
        assert_equal(len(self.objective.varying_parameters()), 4)
        self.objective.setp(self.p0)

        f = CurveFitter(self.objective, nwalkers=100)
        res = f.fit("least_squares", jac="3-point")

        output = res.x
        assert_almost_equal(output, self.best_weighted, 3)
        assert_almost_equal(self.objective.chisqr(), self.best_weighted_chisqr,
                            5)

        # compare the residuals
        res = (self.data.y - self.model(self.data.x)) / self.data.y_err
        assert_equal(self.objective.residuals(), res)

        # compare objective.covar to the best_weighted_errors
        uncertainties = [param.stderr for param in self.params]
        assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.005)

        # we're also going to try the checkpointing here.
        checkpoint = os.path.join(self.tmpdir, "checkpoint.txt")

        # compare samples to best_weighted_errors
        np.random.seed(1)
        f.sample(steps=201, random_state=1, verbose=False, f=checkpoint)
        process_chain(self.objective, f.chain, nburn=50, nthin=10)
        uncertainties = [param.stderr for param in self.params]
        assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.07)

        # test that the checkpoint worked
        check_array = np.loadtxt(checkpoint)
        check_array = check_array.reshape(201, f._nwalkers, f.nvary)
        assert_allclose(check_array, f.chain)

        # test loading the checkpoint
        chain = load_chain(checkpoint)
        assert_allclose(chain, f.chain)

        f.initialise("jitter")
        f.sample(steps=2, nthin=4, f=checkpoint, verbose=False)
        assert_equal(f.chain.shape[0], 2)

        # we should be able to produce 2 * 100 steps from the generator
        g = self.objective.pgen(ngen=20000000000)
        s = [i for i, a in enumerate(g)]
        assert_equal(np.max(s), 200 - 1)
        g = self.objective.pgen(ngen=200)
        pvec = next(g)
        assert_equal(pvec.size, len(self.objective.parameters.flattened()))

        # check that all the parameters are returned via pgen, not only those
        # being varied.
        self.params[0].vary = False
        f = CurveFitter(self.objective, nwalkers=100)
        f.initialise("jitter")
        f.sample(steps=2, nthin=4, f=checkpoint, verbose=False)
        g = self.objective.pgen(ngen=100)
        pvec = next(g)
        assert_equal(pvec.size, len(self.objective.parameters.flattened()))

        # the following test won't work because of emcee/gh226.
        # chain = load_chain(checkpoint)
        # assert_(chain.shape == f.chain.shape)
        # assert_allclose(chain, f.chain)

        # try reproducing best fit with parallel tempering
        self.params[0].vary = True
        f = CurveFitter(self.objective, nwalkers=100, ntemps=10)
        f.fit("differential_evolution", seed=1)

        f.sample(steps=201, random_state=1, verbose=False)
        process_chain(self.objective, f.chain, nburn=50, nthin=15)
        print(self.params[0].chain.shape, self.params[0].chain)

        uncertainties = [param.stderr for param in self.params]
        assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.07)

    def test_best_unweighted(self):
        self.objective.weighted = False
        f = CurveFitter(self.objective, nwalkers=100)
        res = f.fit()

        output = res.x
        assert_almost_equal(self.objective.chisqr(),
                            self.best_unweighted_chisqr)
        assert_almost_equal(output, self.best_unweighted, 5)

        # compare the residuals
        res = self.data.y - self.model(self.data.x)
        assert_equal(self.objective.residuals(), res)

        # compare objective._covar to the best_unweighted_errors
        uncertainties = np.array([param.stderr for param in self.params])
        assert_almost_equal(uncertainties, self.best_unweighted_errors, 3)

        # the samples won't compare to the covariance matrix...
        # f.sample(nsteps=150, nburn=20, nthin=30, random_state=1)
        # uncertainties = [param.stderr for param in self.params]
        # assert_allclose(uncertainties, self.best_unweighted_errors,
        #                 rtol=0.15)

    def test_all_minimisers(self):
        """test minimisers against the Gaussian fit"""
        f = CurveFitter(self.objective)

        methods = ["differential_evolution", "L-BFGS-B", "least_squares"]
        if hasattr(sciopt, "shgo"):
            methods.append("shgo")
        if hasattr(sciopt, "dual_annealing"):
            methods.append("dual_annealing")

        for method in methods:
            self.objective.setp(self.p0)
            res = f.fit(method=method)
            assert_almost_equal(res.x, self.best_weighted, 3)

        # smoke test to check that we can use nlpost
        self.objective.setp(self.p0)
        logp0 = self.objective.logp()

        # check that probabilities are calculated correctly
        assert_allclose(
            self.objective.logpost(),
            self.objective.logp() + self.objective.logl(),
        )
        assert_allclose(self.objective.nlpost(), -self.objective.logpost())
        assert_allclose(self.objective.nlpost(self.p0),
                        -self.objective.logpost(self.p0))

        # if the priors are all uniform then the only difference between
        # logpost and logl is a constant. A minimiser should converge on the
        # same answer. The following tests examine that.
        # The test works for dual_annealing, but not for differential
        # evolution, not sure why that is.
        self.objective.setp(self.p0)
        res1 = f.fit(method="dual_annealing", seed=1)
        assert_almost_equal(res1.x, self.best_weighted, 3)
        nll1 = self.objective.nll()
        nlpost1 = self.objective.nlpost()

        self.objective.setp(self.p0)
        res2 = f.fit(method="dual_annealing", target="nlpost", seed=1)
        assert_almost_equal(res2.x, self.best_weighted, 3)
        nll2 = self.objective.nll()
        nlpost2 = self.objective.nlpost()

        assert_allclose(nlpost1, nlpost2, atol=0.001)
        assert_allclose(nll1, nll2, atol=0.001)

        # these two priors are calculated for different parameter values
        # (before and after the fit) they should be the same because all
        # the parameters have uniform priors.
        assert_almost_equal(self.objective.logp(), logp0)

    def test_pymc3_sample(self):
        # test sampling with pymc3
        try:
            import pymc3 as pm
            from refnx.analysis import pymc3_model
        except (ModuleNotFoundError, ImportError, AttributeError):
            # can't run test if pymc3/theano not installed
            return

        with pymc3_model(self.objective):
            s = pm.NUTS()
            pm.sample(
                200,
                tune=100,
                step=s,
                discard_tuned_samples=True,
                compute_convergence_checks=False,
                random_seed=1,
            )
Beispiel #23
0
def getObjective(data, thicknesses, slds, layerNames, logpExtra=None):

    air = SLD(0, name="air layer")
    airSlab = air(10, 0)
    sio2 = SLD(10, name="bottem layer")
    sio2Slab = sio2(10, 0)

    #     print(" ... ",slds[0].startPoint)
    if len(layerNames) >= 1:
        i = 0
        sld1 = SLD(float(slds[i].startPoint), name=layerNames[i])
        sld1Slab = sld1(thicknesses[i].startPoint, thicknesses[i].roughness)
        sld1Slab.thick.setp(vary=thicknesses[i].vary,
                            bounds=(thicknesses[i].lower,
                                    thicknesses[i].upper))
        sld1Slab.sld.real.setp(vary=slds[i].vary,
                               bounds=(slds[i].lower, slds[i].upper))
    else:
        print(layerNames, " : variable 'layerNames' is empty")

    if len(layerNames) >= 2:
        i = 1
        sld2 = SLD(slds[i].startPoint, name=layerNames[i])
        sld2Slab = sld2(thicknesses[i].startPoint, thicknesses[i].roughness)
        sld2Slab.thick.setp(vary=thicknesses[i].vary,
                            bounds=(thicknesses[i].lower,
                                    thicknesses[i].upper))
        sld2Slab.sld.real.setp(vary=slds[i].vary,
                               bounds=(slds[i].lower, slds[i].upper))

    if len(layerNames) >= 3:
        i = 2
        sld3 = SLD(slds[i].startPoint, name=layerNames[i])
        sld3Slab = sld3(thicknesses[i].startPoint, thicknesses[i].roughness)
        sld3Slab.thick.setp(vary=thicknesses[i].vary,
                            bounds=(thicknesses[i].lower,
                                    thicknesses[i].upper))
        sld3Slab.sld.real.setp(vary=slds[i].vary,
                               bounds=(slds[i].lower, slds[i].upper))

    if len(layerNames) >= 4:
        i = 3
        sld4 = SLD(slds[i].startPoint, name=layerNames[i])
        sld4Slab = sld4(thicknesses[i].startPoint, thicknesses[i].roughness)
        sld4Slab.thick.setp(vary=thicknesses[i].vary,
                            bounds=(thicknesses[i].lower,
                                    thicknesses[i].upper))
        sld4Slab.sld.real.setp(vary=slds[i].vary,
                               bounds=(slds[i].lower, slds[i].upper))

    if len(layerNames) == 1:
        structure = airSlab | sld1Slab | sio2Slab
    if len(layerNames) == 2:
        structure = airSlab | sld1Slab | sld2Slab | sio2Slab
    if len(layerNames) == 3:
        structure = airSlab | sld1Slab | sld2Slab | sld3Slab | sio2Slab
    if len(layerNames) == 4:
        structure = airSlab | sld1Slab | sld2Slab | sld3Slab | sld4Slab | sio2Slab
    model = ReflectModel(structure, bkg=3e-6, dq=5.0)
    objective = Objective(model,
                          data,
                          transform=Transform('logY'),
                          logp_extra=logpExtra)
    return objective, structure
class Motofit(object):
    """
    An interactive slab modeller (Jupyter/ipywidgets based) for Neutron and
    X-ray reflectometry data.

    The interactive modeller is designed to be used in a Jupyter notebook.

    >>> # specify that plots are in a separate graph window
    >>> %matplotlib qt

    >>> # alternately if you want the graph to be embedded in the notebook use
    >>> # %matplotlib notebook

    >>> from refnx.reflect import Motofit
    >>> # create an instance of the modeller
    >>> app = Motofit()
    >>> # display it in the notebook by calling the object with a datafile.
    >>> app('dataset1.txt')
    >>> # lets fit a different dataset
    >>> app2 = Motofit()
    >>> app2('dataset2.txt')

    The `Motofit` instance has several useful attributes that can be used in
    other cells. For example, one can access the `objective` and `curvefitter`
    attributes for more advanced fitting functionality than is available in the
    GUI. A `code` attribute can be used to retrieve a Python code fragment that
    can be used as a basis for developing more complicated models, such as
    interparameter constraints, global fitting, etc.

    Attributes
    ----------
    dataset: :class:`refnx.dataset.Data1D`
        The dataset associated with the modeller
    model: :class:`refnx.reflect.ReflectModel`
        Calculates a theoretical model, from an interfacial structure
        (`model.Structure`).
    objective: :class:`refnx.analysis.Objective`
        The Objective that allows one to compare the model against the data.
    fig: :class:`matplotlib.figure.Figure`
        Graph displaying the data.

    """
    def __init__(self):
        # attributes for the graph
        # for the graph
        self.qmin = 0.005
        self.qmax = 0.5
        self.qpnt = 1000
        self.fig = None

        self.ax_data = None
        self.ax_residual = None
        self.ax_sld = None
        # gridspecs specify how the plots are laid out. Gridspec1 is when the
        # residuals plot is displayed. Gridspec2 is when it's not visible
        self._gridspec1 = gridspec.GridSpec(2,
                                            2,
                                            height_ratios=[5, 1],
                                            width_ratios=[1, 1],
                                            hspace=0.01)
        self._gridspec2 = gridspec.GridSpec(1, 2)

        self.theoretical_plot = None
        self.theoretical_plot_sld = None

        # attributes for a user dataset
        self.dataset = None
        self.objective = None
        self._curvefitter = None
        self.data_plot = None
        self.residuals_plot = None
        self.data_plot_sld = None

        self.dataset_name = widgets.Text(description="dataset:")
        self.dataset_name.disabled = True
        self.chisqr = widgets.FloatText(description="chi-squared:")
        self.chisqr.disabled = True

        # fronting
        slab0 = Slab(0, 0, 0)
        slab1 = Slab(25, 3.47, 3)
        slab2 = Slab(0, 2.07, 3)

        structure = slab0 | slab1 | slab2
        rename_params(structure)
        self.model = ReflectModel(structure)
        structure = slab0 | slab1 | slab2
        self.model = ReflectModel(structure)

        # give some default parameter limits
        self.model.scale.bounds = (0.1, 2)
        self.model.bkg.bounds = (1e-8, 2e-5)
        self.model.dq.bounds = (0, 20)
        for slab in self.model.structure:
            slab.thick.bounds = (0, 2 * slab.thick.value)
            slab.sld.real.bounds = (0, 2 * slab.sld.real.value)
            slab.sld.imag.bounds = (0, 2 * slab.sld.imag.value)
            slab.rough.bounds = (0, 2 * slab.rough.value)

        # the main GUI widget
        self.display_box = widgets.VBox()

        self.tab = widgets.Tab()
        self.tab.set_title(0, "Model")
        self.tab.set_title(1, "Limits")
        self.tab.set_title(2, "Options")
        self.tab.observe(self._on_tab_changed, names="selected_index")

        # an output area for messages.
        self.output = widgets.Output()

        # options tab
        self.plot_type = widgets.Dropdown(
            options=["lin", "logY", "YX4", "YX2"],
            value="lin",
            description="Plot Type:",
            disabled=False,
        )
        self.plot_type.observe(self._on_plot_type_changed, names="value")
        self.use_weights = widgets.RadioButtons(
            options=["Yes", "No"],
            value="Yes",
            description="use dataset weights?",
            style={"description_width": "initial"},
        )
        self.use_weights.observe(self._on_use_weights_changed, names="value")
        self.transform = Transform("lin")
        self.display_residuals = widgets.Checkbox(
            value=False, description="Display residuals")
        self.display_residuals.observe(self._on_display_residuals_changed,
                                       names="value")

        self.model_view = None
        self.set_model(self.model)

    def save_model(self, *args, f=None):
        """
        Serialise a model to a pickle file.
        If `f` is not specified then the file name is constructed from the
        current dataset name; if there is no current dataset then the filename
        is constructed from the current time. These constructed filenames will
        be in the current working directory, for a specific save location `f`
        must be provided.
        This method is only intended to be used to serialise models created by
        this interactive Jupyter widget modeller.

        Parameters
        ----------
        f: file like or str, optional
            File to save model to.
        """
        if f is None:
            f = "model_" + datetime.datetime.now().isoformat() + ".pkl"
            if self.dataset is not None:
                f = "model_" + self.dataset.name + ".pkl"

        with possibly_open_file(f) as g:
            pickle.dump(self.model, g)

    def load_model(self, *args, f=None):
        """
        Load a serialised model.
        If `f` is not specified then an attempt will be made to find a model
        corresponding to the current dataset name,
        `'model_' + self.dataset.name + '.pkl'`. If there is no current
        dataset then the most recent model will be loaded.
        This method is only intended to be used to deserialise models created
        by this interactive Jupyter widget modeller, and will not successfully
        load complicated ReflectModel created outside of the interactive
        modeller.

        Parameters
        ----------
        f: file like or str, optional
            pickle file to load model from.
        """
        if f is None and self.dataset is not None:
            # try and load the model corresponding to the current dataset
            f = "model_" + self.dataset.name + ".pkl"
        elif f is None:
            # load the most recent model file
            files = list(filter(os.path.isfile, glob.glob("model_*.pkl")))
            files.sort(key=lambda x: os.path.getmtime(x))
            files.reverse()
            if len(files):
                f = files[0]

        if f is None:
            self._print("No model file is specified/available.")
            return

        try:
            with possibly_open_file(f, "rb") as g:
                reflect_model = pickle.load(g)
            self.set_model(reflect_model)
        except (RuntimeError, FileNotFoundError) as exc:
            # RuntimeError if the file isn't a ReflectModel
            # FileNotFoundError if the specified file name wasn't found
            self._print(repr(exc), repr(f))

    def set_model(self, model):
        """
        Change the `refnx.reflect.ReflectModel` associated with the `Motofit`
        instance.

        Parameters
        ----------
        model: refnx.reflect.ReflectModel

        """
        if not isinstance(model, ReflectModel):
            raise RuntimeError("`model` was not an instance of ReflectModel")

        if self.model_view is not None:
            self.model_view.unobserve_all()

        # figure out if the reflect_model is a different instance. If it is
        # then the objective has to be updated.
        if model is not self.model:
            self.model = model
            self._update_analysis_objects()

        self.model = model

        self.model_view = ReflectModelView(self.model)
        self.model_view.observe(self.update_model, names=["view_changed"])
        self.model_view.observe(self.redraw, names=["view_redraw"])

        # observe when the number of varying parameters changed. This
        # invalidates a curvefitter, and a new one has to be produced.
        self.model_view.observe(self._on_num_varying_changed,
                                names=["num_varying"])

        self.model_view.do_fit_button.on_click(self.do_fit)
        self.model_view.to_code_button.on_click(self._to_code)
        self.model_view.save_model_button.on_click(self.save_model)
        self.model_view.load_model_button.on_click(self.load_model)

        self.redraw(None)

    def update_model(self, change):
        """
        Updates the plots when the parameters change

        Parameters
        ----------
        change

        """
        if not self.fig:
            return

        q = np.linspace(self.qmin, self.qmax, self.qpnt)
        theoretical = self.model.model(q)
        yt, _ = self.transform(q, theoretical)

        sld_profile = self.model.structure.sld_profile()
        z, sld = sld_profile
        if self.theoretical_plot is not None:
            self.theoretical_plot.set_data(q, yt)

            self.theoretical_plot_sld.set_data(z, sld)
            self.ax_sld.relim()
            self.ax_sld.autoscale_view()

        if self.dataset is not None:
            # if there's a dataset loaded then residuals_plot
            # should exist
            residuals = self.objective.residuals()
            self.chisqr.value = np.sum(residuals**2)

            self.residuals_plot.set_data(self.dataset.x, residuals)
            self.ax_residual.relim()
            self.ax_residual.autoscale_view()

        self.fig.canvas.draw()

    def _on_num_varying_changed(self, change):
        # observe when the number of varying parameters changed. This
        # invalidates a curvefitter, and a new one has to be produced.
        if change["new"] != change["old"]:
            self._curvefitter = None

    def _update_analysis_objects(self):
        use_weights = self.use_weights.value == "Yes"
        self.objective = Objective(
            self.model,
            self.dataset,
            transform=self.transform,
            use_weights=use_weights,
        )
        self._curvefitter = None

    def __call__(self, data=None, model=None):
        """
        Display the `Motofit` GUI in a Jupyter notebook cell.

        Parameters
        ----------
        data: refnx.dataset.Data1D
            The dataset to associate with the `Motofit` instance.

        model: refnx.reflect.ReflectModel or str or file-like
            A model to associate with the data.
            If `model` is a `str` or `file`-like then the `load_model` method
            will be used to try and load the model from file. This assumes that
            the file is a pickle of a `ReflectModel`
        """
        # the theoretical model
        # display the main graph
        import matplotlib.pyplot as plt

        self.fig = plt.figure(figsize=(9, 4))

        # grid specs depending on whether the residuals are displayed
        if self.display_residuals.value:
            d_gs = self._gridspec1[0, 0]
            sld_gs = self._gridspec1[:, 1]
        else:
            d_gs = self._gridspec2[0, 0]
            sld_gs = self._gridspec2[0, 1]

        self.ax_data = self.fig.add_subplot(d_gs)
        self.ax_data.set_xlabel(r"$Q/\AA^{-1}$")
        self.ax_data.set_ylabel("Reflectivity")

        self.ax_data.grid(True, color="b", linestyle="--", linewidth=0.1)

        self.ax_sld = self.fig.add_subplot(sld_gs)
        self.ax_sld.set_ylabel(r"$\rho/10^{-6}\AA^{-2}$")
        self.ax_sld.set_xlabel(r"$z/\AA$")

        self.ax_residual = self.fig.add_subplot(self._gridspec1[1, 0],
                                                sharex=self.ax_data)
        self.ax_residual.set_xlabel(r"$Q/\AA^{-1}$")
        self.ax_residual.grid(True, color="b", linestyle="--", linewidth=0.1)
        self.ax_residual.set_visible(self.display_residuals.value)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            self.fig.tight_layout()

        q = np.linspace(self.qmin, self.qmax, self.qpnt)
        theoretical = self.model.model(q)
        yt, _ = self.transform(q, theoretical)

        self.theoretical_plot = self.ax_data.plot(q, yt, zorder=2)[0]
        self.ax_data.set_yscale("log")

        z, sld = self.model.structure.sld_profile()
        self.theoretical_plot_sld = self.ax_sld.plot(z, sld)[0]

        # the figure has been reset, so remove ref to the data_plot,
        # residual_plot
        self.data_plot = None
        self.residuals_plot = None

        self.dataset = None
        if data is not None:
            self.load_data(data)

        if isinstance(model, ReflectModel):
            self.set_model(model)
            return self.display_box
        elif model is not None:
            self.load_model(model)
            return self.display_box

        self.redraw(None)
        return self.display_box

    def load_data(self, data):
        """
        Load a dataset into the `Motofit` instance.

        Parameters
        ----------
        data: refnx.dataset.Data1D, or str, or file-like
        """
        if isinstance(data, ReflectDataset):
            self.dataset = data
        else:
            self.dataset = ReflectDataset(data)

        self.dataset_name.value = self.dataset.name

        # loading a dataset changes the objective and curvefitter
        self._update_analysis_objects()

        self.qmin = np.min(self.dataset.x)
        self.qmax = np.max(self.dataset.x)
        if self.fig is not None:
            yt, et = self.transform(self.dataset.x, self.dataset.y)

            if self.data_plot is None:
                (self.data_plot, ) = self.ax_data.plot(
                    self.dataset.x,
                    yt,
                    label=self.dataset.name,
                    ms=2,
                    marker="o",
                    ls="",
                    zorder=1,
                )
                self.data_plot.set_label(self.dataset.name)
                self.ax_data.legend()

                # no need to calculate residuals here, that'll be updated in
                # the redraw method
                (self.residuals_plot, ) = self.ax_residual.plot(self.dataset.x)
            else:
                self.data_plot.set_xdata(self.dataset.x)
                self.data_plot.set_ydata(yt)

            # calculate theoretical model over same range as data
            # use redraw over update_model because it ensures chi2 widget gets
            # displayed
            self.redraw(None)
            self.ax_data.relim()
            self.ax_data.autoscale_view()
            self.ax_residual.relim()
            self.ax_residual.autoscale_view()
            self.fig.canvas.draw()

    def redraw(self, change):
        """
        Redraw the Jupyter GUI associated with the `Motofit` instance.
        """
        self._update_display_box(self.display_box)
        self.update_model(None)

    @property
    def curvefitter(self):
        """
        class:`CurveFitter` : Object for fitting the data based on the
        objective.
        """
        if self.objective is not None and self._curvefitter is None:
            self._curvefitter = CurveFitter(self.objective)

        return self._curvefitter

    def _print(self, string):
        """
        Print to the output widget
        """
        from IPython.display import clear_output

        with self.output:
            clear_output()
            print(string)

    def do_fit(self, *args):
        """
        Ask the Motofit object to perform a fit (differential evolution).

        Parameters
        ----------
        change

        Notes
        -----
        After performing the fit the Jupyter display is updated.

        """
        if self.dataset is None:
            return

        if not self.model.parameters.varying_parameters():
            self._print("No parameters are being varied")
            return

        try:
            logp = self.objective.logp()
            if not np.isfinite(logp):
                self._print("One of your parameter values lies outside its"
                            " bounds. Please adjust the value, or the bounds.")
                return
        except ZeroDivisionError:
            self._print("One parameter has equal lower and upper bounds."
                        " Either alter the bounds, or don't let that"
                        " parameter vary.")
            return

        def callback(xk, convergence):
            self.chisqr.value = self.objective.chisqr(xk)

        self.curvefitter.fit("differential_evolution", callback=callback)

        # need to update the widgets as the model will be updated.
        # this also redraws GUI.
        # self.model_view.refresh()
        self.set_model(self.model)

        self._print(str(self.objective))

    def _to_code(self, change=None):
        self._print(self.code)

    @property
    def code(self):
        """
        str : A Python code fragment capable of fitting the data.
        Executable Python code fragment for the GUI model.
        """
        if self.objective is None:
            self._update_analysis_objects()

        return to_code(self.objective)

    def _on_tab_changed(self, change):
        pass

    def _on_plot_type_changed(self, change):
        """
        User would like to plot and fit as logR/linR/RQ4/RQ2, etc
        """
        self.transform = Transform(change["new"])
        if self.objective is not None:
            self.objective.transform = self.transform

        if self.dataset is not None:
            yt, _ = self.transform(self.dataset.x, self.dataset.y)

            self.data_plot.set_xdata(self.dataset.x)
            self.data_plot.set_ydata(yt)

        self.update_model(None)

        # probably have to change LHS axis of the data plot when
        # going between different plot types.
        if change["new"] == "logY":
            self.ax_data.set_yscale("linear")
        else:
            self.ax_data.set_yscale("log")

        self.ax_data.relim()
        self.ax_data.autoscale_view()
        self.fig.canvas.draw()

    def _on_use_weights_changed(self, change):
        self._update_analysis_objects()
        self.update_model(None)

    def _on_display_residuals_changed(self, change):
        import matplotlib.pyplot as plt

        if change["new"]:
            self.ax_residual.set_visible(True)
            self.ax_data.set_position(self._gridspec1[0, 0].get_position(
                self.fig))
            self.ax_sld.set_position(self._gridspec1[:,
                                                     1].get_position(self.fig))
            plt.setp(self.ax_data.get_xticklabels(), visible=False)
        else:
            self.ax_residual.set_visible(False)
            self.ax_data.set_position(self._gridspec2[:, 0].get_position(
                self.fig))
            self.ax_sld.set_position(self._gridspec2[:,
                                                     1].get_position(self.fig))
            plt.setp(self.ax_data.get_xticklabels(), visible=True)

    @property
    def _options_box(self):
        return widgets.VBox(
            [self.plot_type, self.use_weights, self.display_residuals])

    def _update_display_box(self, box):
        """
        Redraw the Jupyter GUI associated with the `Motofit` instance
        """
        vbox_widgets = []

        if self.dataset is not None:
            vbox_widgets.append(widgets.HBox([self.dataset_name, self.chisqr]))

        self.tab.children = [
            self.model_view.model_box,
            self.model_view.limits_box,
            self._options_box,
        ]

        vbox_widgets.append(self.tab)
        vbox_widgets.append(self.output)
        box.children = tuple(vbox_widgets)
Beispiel #25
0
class TestCurveFitter(object):
    def setup_method(self):
        # Reproducible results!
        np.random.seed(123)

        self.m_true = -0.9594
        self.b_true = 4.294
        self.f_true = 0.534
        self.m_ls = -1.1040757010910947
        self.b_ls = 5.4405552502319505

        # Generate some synthetic data from the model.
        N = 50
        x = np.sort(10 * np.random.rand(N))
        y_err = 0.1 + 0.5 * np.random.rand(N)
        y = self.m_true * x + self.b_true
        y += np.abs(self.f_true * y) * np.random.randn(N)
        y += y_err * np.random.randn(N)

        self.data = Data1D(data=(x, y, y_err))

        self.p = Parameter(self.b_ls, "b", vary=True, bounds=(-100, 100))
        self.p |= Parameter(self.m_ls, "m", vary=True, bounds=(-100, 100))

        self.model = Model(self.p, fitfunc=line)
        self.objective = Objective(self.model, self.data)
        assert_(len(self.objective.varying_parameters()) == 2)

        mod = np.array([
            4.78166609,
            4.42364699,
            4.16404064,
            3.50343504,
            3.4257084,
            2.93594347,
            2.92035638,
            2.67533842,
            2.28136038,
            2.19772983,
            1.99295496,
            1.93748334,
            1.87484436,
            1.65161016,
            1.44613461,
            1.11128101,
            1.04584535,
            0.86055984,
            0.76913963,
            0.73906649,
            0.73331407,
            0.68350418,
            0.65216599,
            0.59838566,
            0.13070299,
            0.10749131,
            -0.01010195,
            -0.10010155,
            -0.29495372,
            -0.42817431,
            -0.43122391,
            -0.64637715,
            -1.30560686,
            -1.32626428,
            -1.44835768,
            -1.52589881,
            -1.56371158,
            -2.12048349,
            -2.24899179,
            -2.50292682,
            -2.53576659,
            -2.55797996,
            -2.60870542,
            -2.7074727,
            -3.93781479,
            -4.12415366,
            -4.42313742,
            -4.98368609,
            -5.38782395,
            -5.44077086,
        ])
        self.mod = mod

        self.mcfitter = CurveFitter(self.objective)

    def test_bounds_list(self):
        bnds = bounds_list(self.p)
        assert_allclose(bnds, [(-100, 100), (-100, 100)])

        # try making a Parameter bound a normal distribution, then get an
        # approximation to box bounds
        self.p[0].bounds = PDF(norm(0, 1))
        assert_allclose(bounds_list(self.p),
                        [norm(0, 1).ppf([0.005, 0.995]), (-100, 100)])

    def test_constraints(self):
        # constraints should work during fitting
        self.p[0].value = 5.4

        self.p[1].constraint = -0.203 * self.p[0]
        assert_equal(self.p[1].value, self.p[0].value * -0.203)
        res = self.mcfitter.fit()

        assert_(res.success)
        assert_equal(len(self.objective.varying_parameters()), 1)

        # lnsigma is parameters[0]
        assert_(self.p[0] is self.objective.parameters.flattened()[0])
        assert_(self.p[1] is self.objective.parameters.flattened()[1])
        assert_almost_equal(self.p[0].value, res.x[0])
        assert_almost_equal(self.p[1].value, self.p[0].value * -0.203)

        # check that constraints work during sampling
        # the CurveFitter has to be set up again if you change how the
        # parameters are being fitted.
        mcfitter = CurveFitter(self.objective)
        assert_(mcfitter.nvary == 1)
        mcfitter.sample(5)
        assert_equal(self.p[1].value, self.p[0].value * -0.203)
        # the constrained parameters should have a chain
        assert_(self.p[0].chain is not None)
        assert_(self.p[1].chain is not None)
        assert_allclose(self.p[1].chain, self.p[0].chain * -0.203)

    def test_mcmc(self):
        self.mcfitter.sample(steps=50, nthin=1, verbose=False)

        assert_equal(self.mcfitter.nvary, 2)

        # smoke test for corner plot
        self.mcfitter.objective.corner()

        # we're not doing Parallel Tempering here.
        assert_(self.mcfitter._ntemps == -1)
        assert_(isinstance(self.mcfitter.sampler, emcee.EnsembleSampler))

        # should be able to multithread
        mcfitter = CurveFitter(self.objective, nwalkers=50)
        res = mcfitter.sample(steps=33, nthin=2, verbose=False, pool=2)

        # check that the autocorrelation function at least runs
        acfs = mcfitter.acf(nburn=10)
        assert_equal(acfs.shape[-1], mcfitter.nvary)

        # check the standalone autocorrelation calculator
        acfs2 = autocorrelation_chain(mcfitter.chain, nburn=10)
        assert_equal(acfs, acfs2)

        # check integrated_time
        integrated_time(acfs2, tol=5)

        # check chain shape
        assert_equal(mcfitter.chain.shape, (33, 50, 2))
        # assert_equal(mcfitter._lastpos, mcfitter.chain[:, -1, :])
        assert_equal(res[0].chain.shape, (33, 50))

        # if the number of parameters changes there should be an Exception
        # raised
        from pytest import raises

        with raises(RuntimeError):
            self.p[0].vary = False
            self.mcfitter.sample(1)

        # can fix by making the sampler again
        self.mcfitter.make_sampler()
        self.mcfitter.sample(1)

    def test_random_seed(self):
        # check that MCMC sampling is reproducible
        self.mcfitter.sample(steps=2, random_state=1)

        # get a starting pos
        starting_pos = self.mcfitter._state.coords

        # is sampling reproducible
        self.mcfitter.reset()
        self.mcfitter.initialise(pos=starting_pos)
        self.mcfitter.sample(3, random_state=1, pool=1)
        chain1 = np.copy(self.mcfitter.chain)

        self.mcfitter.reset()
        self.mcfitter.initialise(pos=starting_pos)
        self.mcfitter.sample(3, random_state=1, pool=1)
        chain2 = np.copy(self.mcfitter.chain)

        assert_equal(chain1, chain2)

    def test_mcmc_pt(self):
        # smoke test for parallel tempering
        x = np.array(self.objective.parameters)

        mcfitter = CurveFitter(self.objective, ntemps=10, nwalkers=50)
        assert_equal(mcfitter.sampler.ntemps, 10)

        # check that the parallel sampling works
        # and that chain shape is correct
        res = mcfitter.sample(steps=5, nthin=2, verbose=False, pool=-1)
        assert_equal(mcfitter.chain.shape, (5, 10, 50, 2))
        assert_equal(res[0].chain.shape, (5, 50))
        assert_equal(mcfitter.chain[:, 0, :, 0], res[0].chain)
        assert_equal(mcfitter.chain[:, 0, :, 1], res[1].chain)
        chain = np.copy(mcfitter.chain)

        # the sampler should store the probability
        assert_equal(mcfitter.logpost.shape, (5, 10, 50))
        assert_allclose(mcfitter.logpost, mcfitter.sampler._ptchain.logP)

        logprobs = mcfitter.logpost
        highest_prob_loc = np.argmax(logprobs[:, 0])
        idx = np.unravel_index(highest_prob_loc, logprobs[:, 0].shape)
        idx = list(idx)
        idx.insert(1, 0)
        idx = tuple(idx)
        assert_equal(idx, mcfitter.index_max_prob)
        pvals = mcfitter.chain[idx]
        assert_allclose(logprobs[idx], self.objective.logpost(pvals))

        # try resetting the chain
        mcfitter.reset()

        # test for reproducible operation
        self.objective.setp(x)
        mcfitter = CurveFitter(self.objective, ntemps=10, nwalkers=50)
        mcfitter.initialise("jitter", random_state=1)
        mcfitter.sample(steps=5, nthin=2, verbose=False, random_state=2)
        chain = np.copy(mcfitter.chain)

        self.objective.setp(x)
        mcfitter = CurveFitter(self.objective, ntemps=10, nwalkers=50)
        mcfitter.initialise("jitter", random_state=1)
        mcfitter.sample(steps=5, nthin=2, verbose=False, random_state=2)
        chain2 = np.copy(mcfitter.chain)

        assert_allclose(chain2, chain)

    def test_mcmc_init(self):
        # smoke test for sampler initialisation
        # TODO check that the initialisation worked.
        # reproducible initialisation with random_state dependents
        self.mcfitter.initialise("prior", random_state=1)
        starting_pos = np.copy(self.mcfitter._state.coords)

        self.mcfitter.initialise("prior", random_state=1)
        starting_pos2 = self.mcfitter._state.coords
        assert_equal(starting_pos, starting_pos2)

        self.mcfitter.initialise("jitter", random_state=1)
        starting_pos = np.copy(self.mcfitter._state.coords)

        self.mcfitter.initialise("jitter", random_state=1)
        starting_pos2 = self.mcfitter._state.coords
        assert_equal(starting_pos, starting_pos2)

        mcfitter = CurveFitter(self.objective, nwalkers=100)
        mcfitter.initialise("covar")
        assert_equal(mcfitter._state.coords.shape, (100, 2))
        mcfitter.initialise("prior")
        assert_equal(mcfitter._state.coords.shape, (100, 2))
        mcfitter.initialise("jitter")
        assert_equal(mcfitter._state.coords.shape, (100, 2))
        # initialise with last position
        mcfitter.sample(steps=1)
        chain = mcfitter.chain
        mcfitter.initialise(pos=chain[-1])
        assert_equal(mcfitter._state.coords.shape, (100, 2))
        # initialise with chain
        mcfitter.sample(steps=2)
        chain = mcfitter.chain
        mcfitter.initialise(pos=chain)
        assert_equal(mcfitter._state.coords, chain[-1])
        # initialise with chain if it's never been run before
        mcfitter = CurveFitter(self.objective, nwalkers=100)
        mcfitter.initialise(chain)

        # initialise for Parallel tempering
        mcfitter = CurveFitter(self.objective, ntemps=20, nwalkers=100)
        mcfitter.initialise("covar")
        assert_equal(mcfitter._state.coords.shape, (20, 100, 2))
        mcfitter.initialise("prior")
        assert_equal(mcfitter._state.coords.shape, (20, 100, 2))
        mcfitter.initialise("jitter")
        assert_equal(mcfitter._state.coords.shape, (20, 100, 2))
        # initialise with last position
        mcfitter.sample(steps=1)
        chain = mcfitter.chain
        mcfitter.initialise(pos=chain[-1])
        assert_equal(mcfitter._state.coords.shape, (20, 100, 2))
        # initialise with chain
        mcfitter.sample(steps=2)
        chain = mcfitter.chain
        mcfitter.initialise(pos=np.copy(chain))
        assert_equal(mcfitter._state.coords, chain[-1])
        # initialise with chain if it's never been run before
        mcfitter = CurveFitter(self.objective, nwalkers=100, ntemps=20)
        mcfitter.initialise(chain)

    def test_fit_smoke(self):
        # smoke tests to check that fit runs
        def callback(xk):
            return

        def callback2(xk, **kws):
            return

        # L-BFGS-B
        res0 = self.mcfitter.fit(callback=callback)
        assert_almost_equal(res0.x, [self.b_ls, self.m_ls], 6)
        res0 = self.mcfitter.fit()
        res0 = self.mcfitter.fit(verbose=False)
        res0 = self.mcfitter.fit(verbose=False, callback=callback)

        # least_squares
        res1 = self.mcfitter.fit(method="least_squares")
        assert_almost_equal(res1.x, [self.b_ls, self.m_ls], 6)

        # least_squares doesn't accept a callback. As well as testing that
        # least_squares works, it checks that providing a callback doesn't
        # trip the fitter up.
        res1 = self.mcfitter.fit(method="least_squares", callback=callback)
        assert_almost_equal(res1.x, [self.b_ls, self.m_ls], 6)

        # need full bounds for differential_evolution
        self.p[0].range(3, 7)
        self.p[1].range(-2, 0)
        res2 = self.mcfitter.fit(
            method="differential_evolution",
            seed=1,
            popsize=10,
            maxiter=100,
            callback=callback2,
        )
        assert_almost_equal(res2.x, [self.b_ls, self.m_ls], 6)

        # check that the res object has covar and stderr
        assert_("covar" in res0)
        assert_("stderr" in res0)

    def test_NIST(self):
        # Run all the NIST standard tests with leastsq
        for model in NIST_Models:
            try:
                NIST_runner(model)
            except Exception:
                print(model)
                raise
    def test_multipledataset_corefinement(self):
        # test corefinement of three datasets
        data361 = ReflectDataset(os.path.join(self.pth, 'e361r.txt'))
        data365 = ReflectDataset(os.path.join(self.pth, 'e365r.txt'))
        data366 = ReflectDataset(os.path.join(self.pth, 'e366r.txt'))

        si = SLD(2.07, name='Si')
        sio2 = SLD(3.47, name='SiO2')
        d2o = SLD(6.36, name='d2o')
        h2o = SLD(-0.56, name='h2o')
        cm3 = SLD(3.47, name='cm3')
        polymer = SLD(1, name='polymer')

        structure361 = si | sio2(10, 4) | polymer(200, 3) | d2o(0, 3)
        structure365 = si | structure361[1] | structure361[2] | cm3(0, 3)
        structure366 = si | structure361[1] | structure361[2] | h2o(0, 3)

        structure365[-1].rough = structure361[-1].rough
        structure366[-1].rough = structure361[-1].rough

        structure361[1].thick.setp(vary=True, bounds=(0, 20))
        structure361[2].thick.setp(value=200., bounds=(200., 250.), vary=True)
        structure361[2].sld.real.setp(vary=True, bounds=(0, 2))
        structure361[2].vfsolv.setp(value=5., bounds=(0., 100.), vary=True)

        model361 = ReflectModel(structure361, bkg=2e-5)
        model365 = ReflectModel(structure365, bkg=2e-5)
        model366 = ReflectModel(structure366, bkg=2e-5)

        model361.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
        model365.bkg.setp(vary=True, bounds=(1e-6, 5e-5))
        model366.bkg.setp(vary=True, bounds=(1e-6, 5e-5))

        objective361 = Objective(model361, data361)
        objective365 = Objective(model365, data365)
        objective366 = Objective(model366, data366)

        global_objective = GlobalObjective(
            [objective361, objective365, objective366])
        # are the right numbers of parameters varying?
        assert_equal(len(global_objective.varying_parameters()), 7)

        # can we set the parameters?
        global_objective.setp(np.array([1e-5, 10, 212, 1, 10, 1e-5, 1e-5]))

        f = CurveFitter(global_objective)
        f.fit()

        indiv_chisqr = np.sum(
            [objective.chisqr() for objective in global_objective.objectives])

        # the overall chi2 should be sum of individual chi2
        global_chisqr = global_objective.chisqr()
        assert_almost_equal(global_chisqr, indiv_chisqr)

        # now check that the parameters were held in common correctly.
        slabs361 = structure361.slabs()
        slabs365 = structure365.slabs()
        slabs366 = structure366.slabs()

        assert_equal(slabs365[0:2, 0:5], slabs361[0:2, 0:5])
        assert_equal(slabs366[0:2, 0:5], slabs361[0:2, 0:5])
        assert_equal(slabs365[-1, 3], slabs361[-1, 3])
        assert_equal(slabs366[-1, 3], slabs361[-1, 3])

        # check that the residuals are the correct lengths
        res361 = objective361.residuals()
        res365 = objective365.residuals()
        res366 = objective366.residuals()
        res_global = global_objective.residuals()
        assert_allclose(res_global[0:len(res361)], res361, rtol=1e-5)
        assert_allclose(res_global[len(res361):len(res361) + len(res365)],
                        res365,
                        rtol=1e-5)
        assert_allclose(res_global[len(res361) + len(res365):],
                        res366,
                        rtol=1e-5)

        repr(global_objective)
Beispiel #27
0
    n = l.reshape(timesteps, sim.layers.shape[1]-layers_to_cut, sim.layers.shape[2])

    data_dir = '../data/reflectometry2/dspc_{}/'.format(surface_pressure)
    dataset = ReflectDataset(os.path.join(data_dir, '{}{}.dat'.format(contrast, surface_pressure)))

    refy = np.zeros((n.shape[0], dataset.x.size))
    sldy = []
    chi = np.zeros((n.shape[0]))
    print(n.shape[0])
    for i in range(n.shape[0]):
        sim.av_layers = n[i, :, :]
        model = ReflectModel(sim)
        model.scale.setp(1, vary=True, bounds=(0.00000001, np.inf))
        model.bkg.setp(dataset.y[-1], vary=False)
        objective = Objective(model, dataset, transform=Transform('YX4'))
        fitter = CurveFitter(objective)
        res = fitter.fit()
        refy[i] = model(dataset.x, x_err=dataset.x_err)*(dataset.x)**4
        sldy.append(sim.sld_profile()[1])
        chi[i] = objective.chisqr()
        all_chi = np.append(all_chi, objective.chisqr())
        if i == 0:
            ax1.errorbar(dataset.x,
                         dataset.y*(dataset.x)**4 * 10**(ci-1),
                         yerr=dataset.y_err*(
                             dataset.x)**4 * 10**(ci-1),
                         linestyle='', marker='o',
                         color=sns.color_palette()[ci])
        if i % 5 == 0:
            ax1.plot(dataset.x,
Beispiel #28
0
d2o = SLD(6.36, name='d2o')

structure = si | sio2(30, 3) | film(250, 3) | d2o(0, 3)
structure[1].thick.setp(vary=True, bounds=(15., 50.))
structure[1].rough.setp(vary=True, bounds=(1., 6.))
structure[2].thick.setp(vary=True, bounds=(200, 300))
structure[2].sld.real.setp(vary=True, bounds=(0.1, 3))
structure[2].rough.setp(vary=True, bounds=(1, 6))

model = ReflectModel(structure, bkg=9e-6, scale=1.)
model.bkg.setp(vary=True, bounds=(1e-8, 1e-5))
model.scale.setp(vary=True, bounds=(0.9, 1.1))

# fit on a logR scale, but use weighting
objective = Objective(model,
                      data,
                      transform=Transform('logY'),
                      use_weights=True)

# create the fit instance
fitter = CurveFitter(objective)

# do the fit
res = fitter.fit(method='differential_evolution')

# see the fit results
print(objective)

fig = plt.figure()
ax = fig.add_subplot(2, 1, 1)
ax.scatter(data.x, data.y, label=DATASET_NAME)
ax.semilogy()
Beispiel #29
0
def getObjective(data, nLayers, bs_contrast_layer=None,
                 contrast_layer=None,
                 limits = None, doMCMC=False,
                 logpExtra=None, onlyStructure=False,
                 both=False, globalObjective=False):

    if globalObjective:
        if bs_contrast_layer is None:
            bs_contrast_layer = 6
        if contrast_layer is None:
            contrast_layer = 1
#         print("data, nLayers, bs_contrast_layer=None,\n contrast_layer=None,\nlimits = None, doMCMC=False,\nlogpExtra=None, onlyStructure=False,\nboth=False, globalObjective=False: ",
#                      data, nLayers, bs_contrast_layer,
#                      contrast_layer,
#                      limits, doMCMC,
#                      logpExtra, onlyStructure,
#                      both, globalObjective)

    air = SLD(0,name="air layer")
    airSlab = air(10,0)

    sio2 = SLD(10,name="bottem layer")
    sio2Slab = sio2(10,0)


    if limits is None:
        limits = [350,50,4,6]
    
#     maxThick = 350
#     lowerThick = 50
#     upperThick = maxThick - nLayers*lowerThick
#     lowerB = 4
#     upperB = 6

    maxThick = float(limits[0])
    lowerThick = limits[1]
    upperThick = maxThick - nLayers*lowerThick
    lowerB = limits[2]
    upperB = limits[3]

    if globalObjective:
        thick_contrast_layer=Parameter(maxThick/nLayers,
                                        "layer1 thickness")
        rough_contrast_layer=Parameter(0,
                                    "layer0/contrast roughness")
        sldcontrastA=SLD(5,name="contrast A layer")
        sldcontrastASlab= sldcontrastA(thick_contrast_layer,rough_contrast_layer)
        sldcontrastASlab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
        sldcontrastASlab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

        sldcontrastB=SLD(5,name="contrast B layer")
        sldcontrastBSlab = sldcontrastB(thick_contrast_layer,rough_contrast_layer)
        sldcontrastBSlab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
        sldcontrastBSlab.sld.real.setp(vary=True, bounds=(lowerB,upperB))


    if nLayers>=1 and not globalObjective:
        sld1 = SLD(5,name="first layer")
        sld1Slab = sld1(maxThick/nLayers,0)
        sld1Slab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
        sld1Slab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

    if nLayers>=2:
        sld2 = SLD(5,name="second layer")
        sld2Slab = sld2(maxThick/nLayers,0)
        sld2Slab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
        sld2Slab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

    if nLayers>=3:
        sld3 = SLD(5,name="third layer")
        sld3Slab = sld3(maxThick/nLayers,0)
        sld3Slab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
        sld3Slab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

    if nLayers>=4:
        sld4 = SLD(5,name="forth layer")
        sld4Slab = sld4(maxThick/nLayers,0)
        sld4Slab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
        sld4Slab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

#     if nLayers>=1:
#         sld1Slab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
#         sld1Slab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

#     if nLayers>=2:
#         sld2Slab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
#         sld2Slab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

#     if nLayers>=3:
#         sld3Slab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
#         sld3Slab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

#     if nLayers>=4:
#         sld4Slab.thick.setp(vary=True, bounds=(lowerThick,upperThick))
#         sld4Slab.sld.real.setp(vary=True, bounds=(lowerB,upperB))

    if globalObjective and contrast_layer==1:
        if nLayers==1:
            structure1 = airSlab|sldcontrastASlab|sio2Slab
            structure2 = airSlab|sldcontrastBSlab|sio2Slab
        if nLayers==2:
            structure1 = airSlab|sldcontrastASlab|sld2Slab|sio2Slab
            structure2 = airSlab|sldcontrastBSlab|sld2Slab|sio2Slab
        if nLayers==3:
            structure1 = airSlab|sldcontrastASlab|sld2Slab|sld3Slab|sio2Slab
            structure2 = airSlab|sldcontrastBSlab|sld2Slab|sld3Slab|sio2Slab
        if nLayers==4:
            structure1 = airSlab|sldcontrastASlab|sld2Slab|sld3Slab|sld4Slab|sio2Slab
            structure2 = airSlab|sldcontrastBSlab|sld2Slab|sld3Slab|sld4Slab|sio2Slab
        if onlyStructure:
            returns = structure1,structure2
        elif both:
            model1 = ReflectModel(structure1, bkg=3e-6, dq=5.0)
            model1.scale.setp(bounds=(0.85, 1.2), vary=True)
            model1.bkg.setp(bounds=(1e-9, 9e-6), vary=True)
            objective1 = Objective(model1, data[0],
                      transform=Transform('logY'),
                      logp_extra=logpExtra)
            model2 = ReflectModel(structure2, bkg=3e-6, dq=5.0)
            model2.scale.setp(bounds=(0.85, 1.2), vary=True)
            model2.bkg.setp(bounds=(1e-9, 9e-6), vary=True)
            objective2 = Objective(model2, data[1],
                      transform=Transform('logY'),
                      logp_extra=logpExtra)
            returns = GlobalObjective([objective1, objective2]), structure1, structure2
            print("GlobalObjective and 2 structures")
        else:
            model1 = ReflectModel(structure1, bkg=3e-6, dq=5.0)
            model1.scale.setp(bounds=(0.85, 1.2), vary=True)
            model1.bkg.setp(bounds=(1e-9, 9e-6), vary=True)
            objective1 = Objective(model1, data[0],
                      transform=Transform('logY'),
                      logp_extra=logpExtra)
            model2 = ReflectModel(structure2, bkg=3e-6, dq=5.0)
            model2.scale.setp(bounds=(0.85, 1.2), vary=True)
            model2.bkg.setp(bounds=(1e-9, 9e-6), vary=True)
            objective2 = Objective(model2, data[1],
                      transform=Transform('logY'),
                      logp_extra=logpExtra)
            returns = GlobalObjective([objective1, objective2])

    elif not globalObjective:
        if nLayers==1:
            structure = airSlab|sld1Slab|sio2Slab
        if nLayers==2:
            structure = airSlab|sld1Slab|sld2Slab|sio2Slab
        if nLayers==3:
            structure = airSlab|sld1Slab|sld2Slab|sld3Slab|sio2Slab
        if nLayers==4:
            structure = airSlab|sld1Slab|sld2Slab|sld3Slab|sld4Slab|sio2Slab
        if onlyStructure:
            returns = structure
        elif both:
            model = ReflectModel(structure, bkg=3e-6, dq=5.0)
            objective = Objective(model, data,
                      transform=Transform('logY'),
                      logp_extra=logpExtra)
            returns = objective, structure
        else:
            model = ReflectModel(structure, bkg=3e-6, dq=5.0)
            objective = Objective(model, data, transform=Transform('logY'),logp_extra=logpExtra)
            returns = objective
    else:
        print("error contrast layer not at sld1Slab ie contrast_layer!=0")
#     print(returns)
    return returns
Beispiel #30
0
    hold_phih=True,
)

models = []
t = len(cont)

for i in range(t):
    models.append(ReflectModel(structures[i]))
    models[i].scale.setp(vary=True, bounds=(0.005, 10))
    models[i].bkg.setp(datasets[i].y[-1], vary=True, bounds=(1e-4, 1e-10))

objectives = []
t = len(cont)
for i in range(t):
    objectives.append(
        Objective(models[i], datasets[i], transform=Transform("YX4")))

global_objective = GlobalObjective(objectives)

chain = refnx.analysis.load_chain("{}_chain.txt".format(anal_dir))

pchain = refnx.analysis.process_chain(global_objective, chain)

para_labels = [
    '_scale_{}_{}'.format(sp, cont[0]), '_bkg_{}_{}'.format(sp, cont[0]),
    '-d_h_{}'.format(sp), '-d_t_{}'.format(sp), '_rough_{}'.format(sp),
    '_scale_{}_{}'.format(sp, cont[1]), '_bkg_{}_{}'.format(sp, cont[1]),
    '_scale_{}_{}'.format(sp, cont[2]), '_bkg_{}_{}'.format(sp, cont[2]),
    '_scale_{}_{}'.format(sp, cont[3]), '_bkg_{}_{}'.format(sp, cont[3]),
    '_scale_{}_{}'.format(sp, cont[4]), '_bkg_{}_{}'.format(sp, cont[4]),
    '_scale_{}_{}'.format(sp, cont[5]), '_bkg_{}_{}'.format(sp, cont[5]),