예제 #1
0
    def parameters(self):
        r"""
        :class:`refnx.analysis.Parameters`, all the parameters associated with
        this structure.

        """
        p = Parameters(name='Stack - {0}'.format(self.name))
        p.append(self.repeats)
        p.extend([component.parameters for component in self.components])
        return p
예제 #2
0
    def parameters(self):
        r"""
        :class:`refnx.analysis.Parameters`, all the parameters associated with
        this structure.

        """
        p = Parameters(name='Structure - {0}'.format(self.name))
        p.extend([component.parameters for component in self.components])
        if self._solvent is not None:
            p.append(self.solvent.parameters)
        return p
예제 #3
0
    def test_repr(self):
        p = Parameter(value=5, vary=False, name="test")
        g = Parameters(name="name")
        f = Parameters()
        f.append(p)
        f.append(g)

        q = eval(repr(f))
        assert q.name is None
        assert_equal(q[0].value, 5)
        assert q[0].vary is False
        assert isinstance(q[1], Parameters)
예제 #4
0
    def test_repr(self):
        p = Parameter(value=5, vary=False, name='test')
        g = Parameters(name='name')
        f = Parameters()
        f.append(p)
        f.append(g)

        q = eval(repr(f))
        assert (q.name is None)
        assert_equal(q[0].value, 5)
        assert (q[0].vary is False)
        assert (isinstance(q[1], Parameters))
예제 #5
0
파일: objective.py 프로젝트: llimeht/refnx
    def parameters(self):
        """
        :class:`refnx.analysis.Parameters` associated with all the objectives.

        """
        # TODO this is probably going to be slow.
        # cache and update strategy?
        p = Parameters(name='global fitting parameters')

        for objective in self.objectives:
            p.append(objective.parameters)

        return p
예제 #6
0
    def parameters(self):
        p = Parameters(name=self.name)
        p.extend([
            self.apm, self.b_heads_real, self.b_heads_imag, self.vm_heads,
            self.thickness_heads, self.b_tails_real, self.b_tails_imag,
            self.vm_tails, self.thickness_tails, self.rough_head_tail,
            self.rough_preceding_mono
        ])
        if self.head_solvent is not None:
            p.append(self.head_solvent.parameters)
        if self.tail_solvent is not None:
            p.append(self.tail_solvent.parameters)

        return p
예제 #7
0
    def __init__(
        self,
        structures,
        scales=None,
        bkg=1e-7,
        name="",
        dq=5.0,
        threads=-1,
        quad_order=17,
        dq_type="pointwise",
        q_offset=0.0,
    ):
        self.name = name
        self._parameters = None
        self.threads = threads
        self.quad_order = quad_order

        # all reflectometry models need a scale factor and background. Set
        # them all to 1 by default.
        pscales = Parameters(name="scale factors")

        if scales is not None and len(structures) == len(scales):
            tscales = scales
        elif scales is not None and len(structures) != len(scales):
            raise ValueError("You need to supply scale factor for each"
                             " structure")
        else:
            tscales = [1 / len(structures)] * len(structures)

        for scale in tscales:
            p_scale = possibly_create_parameter(scale, name="scale")
            pscales.append(p_scale)

        self._scales = pscales
        self._bkg = possibly_create_parameter(bkg, name="bkg")

        # we can optimize the resolution (but this is always overridden by
        # x_err if supplied. There is therefore possibly no dependence on it.
        self._dq = possibly_create_parameter(dq, name="dq - resolution")
        self.dq_type = dq_type

        self._q_offset = possibly_create_parameter(q_offset,
                                                   name="q_offset",
                                                   units="Å**-1")

        self._structures = structures
예제 #8
0
    def test_or(self):
        # concatenation of Parameter instances
        a = Parameter(1, name='a')
        b = Parameter(2, name='b')
        c = Parameters(name='c')
        c.append(a)
        c.append(b)

        # concatenate Parameter instances
        d = a | b
        assert_(is_parameters(d))

        # concatenate Parameter with Parameters
        d = a | c
        assert_(is_parameters(d))
        assert_equal(len(d), 2)
        # a, a, b
        assert_equal(len(d.flattened()), 3)
예제 #9
0
    def __init__(self,
                 structures,
                 scales=None,
                 bkg=1e-7,
                 name='',
                 dq=5.,
                 threads=-1,
                 quad_order=17):
        self.name = name
        self._parameters = None
        self.threads = threads
        self.quad_order = quad_order

        # all reflectometry models need a scale factor and background. Set
        # them all to 1 by default.
        pscales = Parameters('scale factors')

        if scales is not None and len(structures) == len(scales):
            tscales = scales
        elif scales is not None and len(structures) != len(scales):
            raise ValueError("You need to supply scale factor for each"
                             " structure")
        else:
            tscales = [1 / len(structures)] * len(structures)

        for scale in tscales:
            p_scale = possibly_create_parameter(scale, name='scale')
            pscales.append(p_scale)

        self._scales = pscales
        self._bkg = possibly_create_parameter(bkg, name='bkg')

        # we can optimize the resolution (but this is always overridden by
        # x_err if supplied. There is therefore possibly no dependence on it.
        self._dq = possibly_create_parameter(dq, name='dq - resolution')

        self._structures = structures
예제 #10
0
class TestFitterGauss(object):
    # Test CurveFitter with a noisy gaussian, weighted and unweighted, to see
    # if the parameters and uncertainties come out correct

    @pytest.fixture(autouse=True)
    def setup_method(self, tmpdir):
        self.path = os.path.dirname(os.path.abspath(__file__))
        self.tmpdir = tmpdir.strpath

        theoretical = np.loadtxt(os.path.join(self.path, 'gauss_data.txt'))
        xvals, yvals, evals = np.hsplit(theoretical, 3)
        xvals = xvals.flatten()
        yvals = yvals.flatten()
        evals = evals.flatten()

        # these best weighted values and uncertainties obtained with Igor
        self.best_weighted = [-0.00246095, 19.5299, -8.28446e-2, 1.24692]

        self.best_weighted_errors = [0.0220313708486, 1.12879436221,
                                     0.0447659158681, 0.0412022938883]

        self.best_weighted_chisqr = 77.6040960351

        self.best_unweighted = [-0.10584111872702096, 19.240347049328989,
                                0.0092623066070940396, 1.501362314145845]

        self.best_unweighted_errors = [0.34246565477, 0.689820935208,
                                       0.0411243173041, 0.0693429375282]

        self.best_unweighted_chisqr = 497.102084956

        self.p0 = np.array([0.1, 20., 0.1, 0.1])
        self.names = ['bkg', 'A', 'x0', 'width']
        self.bounds = [(-1, 1), (0, 30), (-5., 5.), (0.001, 2)]

        self.params = Parameters(name="gauss_params")
        for p, name, bound in zip(self.p0, self.names, self.bounds):
            param = Parameter(p, name=name)
            param.range(*bound)
            param.vary = True
            self.params.append(param)

        self.model = Model(self.params, fitfunc=gauss)
        self.data = Data1D((xvals, yvals, evals))
        self.objective = Objective(self.model, self.data)
        return 0

    def test_best_weighted(self):
        assert_equal(len(self.objective.varying_parameters()), 4)
        self.objective.setp(self.p0)

        f = CurveFitter(self.objective, nwalkers=100)
        res = f.fit('least_squares', jac='3-point')

        output = res.x
        assert_almost_equal(output, self.best_weighted, 3)
        assert_almost_equal(self.objective.chisqr(),
                            self.best_weighted_chisqr, 5)

        # compare the residuals
        res = (self.data.y - self.model(self.data.x)) / self.data.y_err
        assert_equal(self.objective.residuals(), res)

        # compare objective.covar to the best_weighted_errors
        uncertainties = [param.stderr for param in self.params]
        assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.005)

        # we're also going to try the checkpointing here.
        checkpoint = os.path.join(self.tmpdir, 'checkpoint.txt')

        # compare samples to best_weighted_errors
        np.random.seed(1)
        f.sample(steps=101, random_state=1, verbose=False, f=checkpoint)
        process_chain(self.objective, f.chain, nburn=50, nthin=10)
        uncertainties = [param.stderr for param in self.params]
        assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.07)

        # test that the checkpoint worked
        check_array = np.loadtxt(checkpoint)
        check_array = check_array.reshape(101, f._nwalkers, f.nvary)
        assert_allclose(check_array, f.chain)

        # test loading the checkpoint
        chain = load_chain(checkpoint)
        assert_allclose(chain, f.chain)

        f.initialise('jitter')
        f.sample(steps=2, nthin=4, f=checkpoint, verbose=False)
        assert_equal(f.chain.shape[0], 2)

        # the following test won't work because of emcee/gh226.
        # chain = load_chain(checkpoint)
        # assert_(chain.shape == f.chain.shape)
        # assert_allclose(chain, f.chain)

    def test_best_unweighted(self):
        self.objective.weighted = False
        f = CurveFitter(self.objective, nwalkers=100)
        res = f.fit()

        output = res.x
        assert_almost_equal(self.objective.chisqr(),
                            self.best_unweighted_chisqr)
        assert_almost_equal(output, self.best_unweighted, 5)

        # compare the residuals
        res = self.data.y - self.model(self.data.x)
        assert_equal(self.objective.residuals(), res)

        # compare objective._covar to the best_unweighted_errors
        uncertainties = np.array([param.stderr for param in self.params])
        assert_almost_equal(uncertainties, self.best_unweighted_errors, 3)
예제 #11
0
class MixedReflectModel(object):
    r"""
    Calculates an incoherent average of reflectivities from a sequence of
    structures. Such a situation may occur if a sample is not uniform over its
    illuminated area.

    Parameters
    ----------
    structures : sequence of refnx.reflect.Structure
        The interfacial structures to incoherently average
    scales : None, sequence of float or refnx.analysis.Parameter, optional
        scale factors. The reflectivities calculated from each of the
        structures are multiplied by their respective scale factor during
        overall summation. These values are turned into Parameters during the
        construction of this object.
        You must supply a scale factor for each of the structures. If `scales`
        is `None`, then default scale factors are used:
        `[1 / len(structures)] * len(structures)`. It is a good idea to set the
        lower bound of each scale factor to zero (not done by default).
    bkg : float or refnx.analysis.Parameter, optional
        linear background added to the overall reflectivity. This is turned
        into a Parameter during the construction of this object.
    name : str, optional
        Name of the mixed Model
    dq : float or refnx.analysis.Parameter, optional

        - `dq == 0` then no resolution smearing is employed.
        - `dq` is a float or refnx.analysis.Parameter
           a constant dQ/Q resolution smearing is employed.  For 5% resolution
           smearing supply 5.

        However, if `x_err` is supplied to the `model` method, then that
        overrides any setting given here. This value is turned into
        a Parameter during the construction of this object.
    threads: int, optional
        Specifies the number of threads for parallel calculation. This
        option is only applicable if you are using the ``_creflect``
        module. The option is ignored if using the pure python calculator,
        ``_reflect``. If `threads == 0` then all available processors are
        used.
    quad_order: int, optional
        the order of the Gaussian quadrature polynomial for doing the
        resolution smearing. default = 17. Don't choose less than 13. If
        quad_order == 'ultimate' then adaptive quadrature is used. Adaptive
        quadrature will always work, but takes a _long_ time (2 or 3 orders
        of magnitude longer). Fixed quadrature will always take a lot less
        time. BUT it won't necessarily work across all samples. For
        example, 13 points may be fine for a thin layer, but will be
        atrocious at describing a multilayer with bragg peaks.

    """
    def __init__(self,
                 structures,
                 scales=None,
                 bkg=1e-7,
                 name='',
                 dq=5.,
                 threads=0,
                 quad_order=17):
        self.name = name
        self._parameters = None
        self.threads = threads
        self.quad_order = quad_order

        # all reflectometry models need a scale factor and background. Set
        # them all to 1 by default.
        pscales = Parameters('scale factors')

        if scales is not None and len(structures) == len(scales):
            tscales = scales
        elif scales is not None and len(structures) != len(scales):
            raise ValueError("You need to supply scale factor for each"
                             " structure")
        else:
            tscales = [1 / len(structures)] * len(structures)

        for scale in tscales:
            p_scale = possibly_create_parameter(scale, name='scale')
            pscales.append(p_scale)

        self._scales = pscales
        self._bkg = possibly_create_parameter(bkg, name='bkg')

        # we can optimize the resolution (but this is always overridden by
        # x_err if supplied. There is therefore possibly no dependence on it.
        self._dq = possibly_create_parameter(dq, name='dq - resolution')

        self._structures = structures

    def __call__(self, x, p=None, x_err=None):
        return self.model(x, p=p, x_err=x_err)

    @property
    def dq(self):
        r"""
        :class:`refnx.analysis.Parameter`

            - `dq.value == 0`
               no resolution smearing is employed.
            - `dq.value > 0`
               a constant dQ/Q resolution smearing is employed.  For 5%
               resolution smearing supply 5. However, if `x_err` is supplied to
               the `model` method, then that overrides any setting reported
               here.

        """
        return self._dq

    @dq.setter
    def dq(self, value):
        self._dq.value = value

    @property
    def scales(self):
        r"""
        :class:`refnx.analysis.Parameter` - all model values are multiplied by
        this value before the background is added.

        """
        return self._scales

    @property
    def bkg(self):
        r"""
        :class:`refnx.analysis.Parameter` - linear background added to all
        model values.

        """
        return self._bkg

    @bkg.setter
    def bkg(self, value):
        self._bkg.value = value

    def model(self, x, p=None, x_err=None):
        r"""
        Calculate the reflectivity of this model

        Parameters
        ----------
        x : float or np.ndarray
            q values for the calculation.
        p : refnx.analysis.Parameter, optional
            parameters required to calculate the model
        x_err : np.ndarray
            dq resolution smearing values for the dataset being considered.

        Returns
        -------
        reflectivity : np.ndarray

        """
        if p is not None:
            self.parameters.pvals = np.array(p)
        if x_err is None:
            # fallback to what this object was constructed with
            x_err = float(self.dq)

        scales = np.array(self.scales)

        y = np.zeros_like(x)

        for scale, structure in zip(scales, self.structures):
            y += reflectivity(x,
                              structure.slabs[..., :4],
                              scale=scale,
                              dq=x_err,
                              threads=self.threads,
                              quad_order=self.quad_order)

        return y + self.bkg.value

    def lnprob(self):
        r"""
        Additional log-probability terms for the reflectivity model. Do not
        include log-probability terms for model parameters, these are
        automatically calculated elsewhere.

        Returns
        -------
        lnprob : float
            log-probability of structure.

        """
        lnprob = 0
        for structure in self._structures:
            lnprob += structure.lnprob()

        return lnprob

    @property
    def structures(self):
        r"""
        :class:`refnx.reflect.Structure` - object describing the interface of
        a reflectometry sample.

        """
        return self._structures

    @property
    def parameters(self):
        r"""
        :class:`refnx.analysis.Parameters` - parameters associated with this
        model.

        """
        p = Parameters(name='instrument parameters')
        p.extend([self.scales, self.bkg, self.dq])

        self._parameters = Parameters(name=self.name)
        self._parameters.append([p])
        self._parameters.extend(
            [structure.parameters for structure in self._structures])
        return self._parameters
예제 #12
0
class FreeformVFP(Component):
    def __init__(self, adsorbed_amount, vff, dzf, polymer_sld, name='',
                 left_slabs=(), right_slabs=(),
                 interpolator=Pchip, zgrad=True,
                 microslab_max_thickness=1, profile_cutoff=5000):
        """
        Parameters
        ----------
        Adsorbed Amount : Parameter or float
            The total extent of the spline region
        vff: sequence of Parameter or float
            Volume fraction at each of the spline knots, as a fraction of
            the volume fraction of the rightmost left slab
        dzf : sequence of Parameter or float
            Separation of successive knots, will be normalised to a 0-1 scale.
        polymer_sld : SLD or float
            SLD of polymer
        name : str
            Name of component
        gamma : Parameter
            The dry adsorbed amount of polymer
        left_slabs : sequence of Slab
            Polymer Slabs to the left of the spline
        right_slabs : sequence of Slab
            Polymer Slabs to the right of the spline
        interpolator : scipy interpolator
            The interpolator for the spline
        zgrad : bool, optional
            Set to `True` to force the gradient of the volume fraction to zero
            at each end of the spline.
        microslab_max_thickness : float
            Thickness of microslicing of spline for reflectivity calculation.
        profile_cutoff : float
            maximum extent (thickness) of the freeform profile. The profile is
            'cut off' (VF=0) after this point.
        """
        super(FreeformVFP, self).__init__()

        assert len(vff) + 1 == len(dzf), ("Length of dzf must be one greater"
                                          " than length of vff")

        self.name = name

        if isinstance(polymer_sld, SLD):
            self.polymer_sld = polymer_sld
        else:
            self.polymer_sld = SLD(polymer_sld)

        # left and right slabs are other areas where the same polymer can
        # reside
        self.left_slabs = [slab for slab in left_slabs if
                           isinstance(slab, Slab)]
        self.right_slabs = [slab for slab in right_slabs if
                            isinstance(slab, Slab)]

        # use the volume fraction of the last left_slab as the initial vf of
        # the spline, if not left slabs supplied start at vf 1
        if len(self.left_slabs):
            self.start_vf = 1 - self.left_slabs[-1].vfsolv.value
        else:
            self.start_vf = 1

        # in contrast use a vf = 0 for the last vf of
        # the spline, unless right_slabs is specified
        if len(self.right_slabs):
            self.end_vf = 1 - self.right_slabs[0].vfsolv.value
        else:
            self.end_vf = 0

        self.microslab_max_thickness = microslab_max_thickness

        self.adsorbed_amount = (
            possibly_create_parameter(adsorbed_amount,
                                      name='%s - adsorbed amount' % name))

        # dzf are the spatial gaps between the spline knots
        self.dzf = Parameters(name='dzf - spline')
        for i, z in enumerate(dzf):
            p = possibly_create_parameter(
                z,
                name='%s - spline dzf[%d]' % (name, i))
            p.range(0, 1)
            self.dzf.append(p)

        # vf are the volume fraction values of each of the spline knots
        self.vff = Parameters(name='vff - spline')
        for i, v in enumerate(vff):
            p = possibly_create_parameter(
                v,
                name='%s - spline vff[%d]' % (name, i))
            p.range(0, 1)
            self.vff.append(p)

        self.cutoff = profile_cutoff

        self.zgrad = zgrad
        self.interpolator = interpolator

        self.__cached_interpolator = {'zeds': np.array([]),
                                      'vf': np.array([]),
                                      'interp': None,
                                      'adsorbed amount': -1}

    def _update_vfs(self):
        # use the volume fraction of the last left_slab as the initial vf of
        # the spline, if not left slabs supplied start at vf 1
        if len(self.left_slabs):
            self.start_vf = 1 - self.left_slabs[-1].vfsolv.value
        else:
            self.start_vf = 1

        # in contrast use a vf = 0 for the last vf of
        # the spline, unless right_slabs is specified
        if len(self.right_slabs):
            self.end_vf = 1 - self.right_slabs[0].vfsolv.value
        else:
            self.end_vf = 0

    def _vff_to_vf(self):
        self._update_vfs()
        vf = np.cumprod(self.vff) * (self.start_vf - self.end_vf) + self.end_vf
        vf = np.clip(vf, 0, 1)
        return vf

    def _dzf_to_zeds(self):
        zeds = np.cumsum(self.dzf)
        # Normalise dzf to unit interval.
        # clipped to 0 and 1 because we pad on the LHS, RHS later
        # and we need the array to be monotonically increasing
        zeds /= zeds[-1]
        zeds = np.clip(zeds, 0, 1)
        zeds = zeds[0:-1]
        return zeds

    def _extent(self):
        # First calculate slab area:
        slab_area = self._slab_area()
        difference = float(self.adsorbed_amount) - slab_area

        assert difference > 0, ("Your slab area has exceeded your adsorbed"
                                " amount!")

        interpolator = self._vfp_interpolator()
        extent = difference / interpolator.integrate(0, 1)

        return extent

    def _slab_area(self):
        area = 0

        for slab in self.left_slabs:
            _slabs = slab.slabs()
            area += _slabs[0, 0] * (1 - _slabs[0, 4])
        for slab in self.right_slabs:
            _slabs = slab.slabs()
            area += _slabs[0, 0] * (1 - _slabs[0, 4])
        return area

    def _vfp_interpolator(self):
        """
        The spline based volume fraction profile interpolator

        Returns
        -------
        interpolator : scipy.interpolate.Interpolator
        """

        zeds = self._dzf_to_zeds()
        vf = self._vff_to_vf()

        # do you require zero gradient at either end of the spline?
        if self.zgrad:
            zeds = np.concatenate([[-1.1, 0 - EPS],
                                   zeds,
                                   [1 + EPS, 2.1]])
            vf = np.concatenate([[self.start_vf, self.start_vf],
                                 vf,
                                 [self.end_vf, self.end_vf]])
        else:
            zeds = np.concatenate([[0 - EPS], zeds, [1 + EPS]])
            vf = np.concatenate([[self.start_vf], vf, [self.end_vf]])

        # cache the interpolator
        cache_zeds = self.__cached_interpolator['zeds']
        cache_vf = self.__cached_interpolator['vf']
        cache_adsamt = self.__cached_interpolator['adsorbed amount']

        # you don't need to recreate the interpolator
        if (np.equal(float(self.adsorbed_amount), cache_adsamt) and
            np.array_equal(zeds, cache_zeds) and
                np.array_equal(vf, cache_vf)):
            return self.__cached_interpolator['interp']
        else:
            self.__cached_interpolator['zeds'] = zeds
            self.__cached_interpolator['vf'] = vf
            self.__cached_interpolator['adsorbed amount'] = (
                float(self.adsorbed_amount))

        interpolator = self.interpolator(zeds, vf)
        self.__cached_interpolator['interp'] = interpolator
        return interpolator

    def __call__(self, z):
        """
        Calculates the volume fraction profile of the spline

        Parameters
        ----------
        z : float
            Distance along vfp

        Returns
        -------
        vfp : float
            Volume fraction
        """
        interpolator = self._vfp_interpolator()
        vfp = interpolator(z / float(self._extent()))
        return vfp

    def moment(self, moment=1):
        """
        Calculates the n'th moment of the profile

        Parameters
        ----------
        moment : int
            order of moment to be calculated

        Returns
        -------
        moment : float
            n'th moment
        """
        zed, profile = self.profile()
        profile *= zed**moment
        val = simps(profile, zed)
        area = self.profile_area()
        return val / area

    def is_monotonic(self):
        return np.all(self.dzf.pvals < 1)

    @property
    def parameters(self):
        p = Parameters(name=self.name)
        p.extend([self.adsorbed_amount, self.dzf, self.vff,
                  self.polymer_sld.parameters])
        p.extend([slab.parameters for slab in self.left_slabs])
        p.extend([slab.parameters for slab in self.right_slabs])
        return p

    def lnprob(self):
        return 0

    def profile_area(self):
        """
        Calculates integrated area of volume fraction profile

        Returns
        -------
        area: integrated area of volume fraction profile
        """
        interpolator = self._vfp_interpolator()
        area = interpolator.integrate(0, 1) * float(self._extent())

        area += self._slab_area()

        return area

    def slabs(self, structure=None):

        slab_extent = self._extent()
        
        try:
            cutoff = self.cutoff
        except AttributeError:
            cutoff = 5000
            warnings.warn('FreeformVFP out of date')

        if slab_extent > cutoff:
            warnings.warn('extent > %d, perfoming refl. calc on first %dA.' %
                          (cutoff, cutoff), RuntimeWarning)

            slab_extent = cutoff

        num_slabs = np.ceil(float(slab_extent) / self.microslab_max_thickness)
        slab_thick = float(slab_extent / num_slabs)
        slabs = np.zeros((int(num_slabs), 5))
        slabs[:, 0] = slab_thick

        # give last slab a miniscule roughness so it doesn't get contracted
        slabs[-1:, 3] = 0.5

        dist = np.cumsum(slabs[..., 0]) - 0.5 * slab_thick
        slabs[:, 1] = self.polymer_sld.real.value
        slabs[:, 2] = self.polymer_sld.imag.value
        slabs[:, 4] = 1 - self(dist)

        return slabs

    def profile(self, extra=False):
        """
        Calculates the volume fraction profile

        Returns
        -------
        z, vfp : np.ndarray
            Distance from the interface, volume fraction profile
        """
        s = Structure()
        s |= SLD(0)

        m = SLD(1.)

        for i, slab in enumerate(self.left_slabs):
            layer = m(slab.thick.value, slab.rough.value)
            if not i:
                layer.rough.value = 0
            layer.vfsolv.value = slab.vfsolv.value
            s |= layer

        polymer_slabs = self.slabs()
        offset = np.sum(s.slabs()[:, 0])

        for i in range(np.size(polymer_slabs, 0)):
            layer = m(polymer_slabs[i, 0], polymer_slabs[i, 3])
            layer.vfsolv.value = polymer_slabs[i, -1]
            s |= layer

        for i, slab in enumerate(self.right_slabs):
            layer = m(slab.thick.value, slab.rough.value)
            layer.vfsolv.value = 1 - slab.vfsolv.value
            s |= layer

        s |= SLD(0, 0)

        # now calculate the VFP.
        total_thickness = np.sum(s.slabs()[:, 0])
        if total_thickness < 500:
            num_zed_points = int(total_thickness)
        else:
            num_zed_points = 500
        zed = np.linspace(0, total_thickness, num_zed_points)
        # SLD profile puts a very small roughness on the interfaces with zero
        # roughness.
        zed[0] = 0.01
        z, s = s.sld_profile(z=zed)
        s[0] = s[1]

        # perhaps you'd like to plot the knot locations
        zeds = self._dzf_to_zeds()
        zed_knots = zeds * float(self._extent()) + offset

        if extra:
            return z, s, zed_knots, self._vff_to_vf()
        else:
            return z, s
예제 #13
0
class FreeformVFPextent(Component):
    """
    Freeform volume fraction profiles for a polymer brush. The extent of the
    brush is used as a fitting parameter.

    Parameters
    ----------
    extent : Parameter or float
        The total extent of the spline region
    vf: sequence of Parameter or float
        Absolute volume fraction at each of the spline knots
    dz : sequence of Parameter or float
        Separation of successive knots, expressed as a fraction of
        `extent`.
    polymer_sld : SLD or float
        SLD of polymer
    name : str
        Name of component
    gamma : Parameter
        The dry adsorbed amount of polymer
    left_slabs : sequence of Slab
        Slabs to the left of the spline
    right_slabs : sequence of Slab
        Slabs to the right of the spline
    interpolator : scipy interpolator
        The interpolator for the spline
    zgrad : bool, optional
        Set to `True` to force the gradient of the volume fraction to zero
        at each end of the spline.
    monotonic_penalty : number, optional
        The penalty added to the log-probability to penalise non-monotonic
        spline knots.
        Set to a very large number (e.g. 1e250) to enforce a monotonically
        decreasing volume fraction spline.
        Set to a very negative number (e.g. -1e250) to enforce a
        monotonically increasing volume fraction spline.
        Set to zero (default) to apply no penalty.
        Note - the absolute value of `monotonic_penalty` is subtracted from
        the overall log-probability, the sign is only used to determine the
        direction that is requested.
    microslab_max_thickness : float
        Thickness of microslicing of spline for reflectivity calculation.

    """
    def __init__(self,
                 extent,
                 vf,
                 dz,
                 polymer_sld,
                 name='',
                 gamma=None,
                 left_slabs=(),
                 right_slabs=(),
                 interpolator=Pchip,
                 zgrad=True,
                 monotonic_penalty=0,
                 microslab_max_thickness=1):

        self.name = name

        if isinstance(polymer_sld, SLD):
            self.polymer_sld = polymer_sld
        else:
            self.polymer_sld = SLD(polymer_sld)

        # left and right slabs are other areas where the same polymer can
        # reside
        self.left_slabs = [
            slab for slab in left_slabs if isinstance(slab, Slab)
        ]
        self.right_slabs = [
            slab for slab in right_slabs if isinstance(slab, Slab)
        ]

        self.microslab_max_thickness = microslab_max_thickness

        self.extent = (possibly_create_parameter(extent,
                                                 name='%s - spline extent' %
                                                 name))

        # dz are the spatial spacings of the spline knots
        self.dz = Parameters(name='dz - spline')
        for i, z in enumerate(dz):
            p = possibly_create_parameter(z,
                                          name='%s - spline dz[%d]' %
                                          (name, i))
            p.range(0, 1)
            self.dz.append(p)

        # vf are the volume fraction values of each of the spline knots
        self.vf = Parameters(name='vf - spline')
        for i, v in enumerate(vf):
            p = possibly_create_parameter(v,
                                          name='%s - spline vf[%d]' %
                                          (name, i))
            p.range(0, 1)
            self.vf.append(p)

        if len(self.vf) != len(self.dz):
            raise ValueError("dz and vs must have same number of entries")

        self.monotonic_penalty = monotonic_penalty
        self.zgrad = zgrad
        self.interpolator = interpolator

        if gamma is not None:
            self.gamma = possibly_create_parameter(gamma, 'gamma')
        else:
            self.gamma = Parameter(0, 'gamma')

        self.__cached_interpolator = {
            'zeds': np.array([]),
            'vf': np.array([]),
            'interp': None,
            'extent': -1
        }

    def _vfp_interpolator(self):
        """
        The spline based volume fraction profile interpolator

        Returns
        -------
        interpolator : scipy.interpolate.Interpolator
        """
        dz = np.array(self.dz)
        zeds = np.cumsum(dz)

        # if dz's sum to more than 1, then normalise to unit interval.
        # clipped to 0 and 1 because we pad on the LHS, RHS later
        # and we need the array to be monotonically increasing
        if zeds[-1] > 1:
            zeds /= zeds[-1]
            zeds = np.clip(zeds, 0, 1)

        vf = np.array(self.vf)

        # use the volume fraction of the last left_slab as the initial vf of
        # the spline
        if len(self.left_slabs):
            left_end = 1 - self.left_slabs[-1].vfsolv.value
        else:
            left_end = vf[0]

        # in contrast use a vf = 0 for the last vf of
        # the spline, unless right_slabs is specified
        if len(self.right_slabs):
            right_end = 1 - self.right_slabs[0].vfsolv.value
        else:
            right_end = 0

        # do you require zero gradient at either end of the spline?
        if self.zgrad:
            zeds = np.concatenate([[-1.1, 0 - EPS], zeds, [1 + EPS, 2.1]])
            vf = np.concatenate([[left_end, left_end], vf,
                                 [right_end, right_end]])
        else:
            zeds = np.concatenate([[0 - EPS], zeds, [1 + EPS]])
            vf = np.concatenate([[left_end], vf, [right_end]])

        # cache the interpolator
        cache_zeds = self.__cached_interpolator['zeds']
        cache_vf = self.__cached_interpolator['vf']
        cache_extent = self.__cached_interpolator['extent']

        # you don't need to recreate the interpolator
        if (np.array_equal(zeds, cache_zeds) and np.array_equal(vf, cache_vf)
                and np.equal(self.extent, cache_extent)):
            return self.__cached_interpolator['interp']
        else:
            self.__cached_interpolator['zeds'] = zeds
            self.__cached_interpolator['vf'] = vf
            self.__cached_interpolator['extent'] = float(self.extent)

        # TODO make vfp zero for z > self.extent
        interpolator = self.interpolator(zeds, vf)
        self.__cached_interpolator['interp'] = interpolator
        return interpolator

    def __call__(self, z):
        """
        Calculates the volume fraction profile of the spline

        Parameters
        ----------
        z : float
            Distance along vfp

        Returns
        -------
        vfp : float
            Volume fraction
        """
        interpolator = self._vfp_interpolator()
        vfp = interpolator(z / float(self.extent))
        return vfp

    def moment(self, moment=1):
        """
        Calculates the n'th moment of the profile

        Parameters
        ----------
        moment : int
            order of moment to be calculated

        Returns
        -------
        moment : float
            n'th moment
        """
        zed, profile = self.profile()
        profile *= zed**moment
        val = simps(profile, zed)
        area = self.profile_area()
        return val / area

    @property
    def parameters(self):
        p = Parameters(name=self.name)
        p.extend([
            self.extent, self.dz, self.vf, self.polymer_sld.parameters,
            self.gamma
        ])
        p.extend([slab.parameters for slab in self.left_slabs])
        p.extend([slab.parameters for slab in self.right_slabs])
        return p

    def logp(self):
        logp = 0
        # you're trying to enforce monotonicity
        if self.monotonic_penalty:
            monotonic, direction = _is_monotonic(self.vf)

            # if left slab has a lower vf than first spline then profile is
            # not monotonic
            if self.vf[0] > (1 - self.left_slabs[-1].vfsolv):
                monotonic = False

            if not monotonic:
                # you're not monotonic so you have to have the penalty
                # anyway
                logp -= np.abs(self.monotonic_penalty)
            else:
                # you are monotonic, but might be in the wrong direction
                if self.monotonic_penalty > 0 and direction > 0:
                    # positive penalty means you want decreasing
                    logp -= np.abs(self.monotonic_penalty)
                elif self.monotonic_penalty < 0 and direction < 0:
                    # negative penalty means you want increasing
                    logp -= np.abs(self.monotonic_penalty)

        # log-probability for area under profile
        logp += self.gamma.logp(self.profile_area())
        return logp

    def profile_area(self):
        """
        Calculates integrated area of volume fraction profile

        Returns
        -------
        area: integrated area of volume fraction profile
        """
        interpolator = self._vfp_interpolator()
        area = interpolator.integrate(0, 1) * float(self.extent)

        for slab in self.left_slabs:
            _slabs = slab.slabs()
            area += _slabs[0, 0] * (1 - _slabs[0, 4])
        for slab in self.right_slabs:
            _slabs = slab.slabs()
            area += _slabs[0, 0] * (1 - _slabs[0, 4])

        return area

    def slabs(self, structure=None):
        num_slabs = np.ceil(float(self.extent) / self.microslab_max_thickness)
        slab_thick = float(self.extent / num_slabs)
        slabs = np.zeros((int(num_slabs), 5))
        slabs[:, 0] = slab_thick

        # give last slab a miniscule roughness so it doesn't get contracted
        slabs[-1:, 3] = 0.5

        dist = np.cumsum(slabs[..., 0]) - 0.5 * slab_thick
        slabs[:, 1] = self.polymer_sld.real.value
        slabs[:, 2] = self.polymer_sld.imag.value
        slabs[:, 4] = 1 - self(dist)

        return slabs

    def profile(self, extra=False):
        """
        Calculates the volume fraction profile

        Returns
        -------
        z, vfp : np.ndarray
            Distance from the interface, volume fraction profile
        """
        s = Structure()
        s |= SLD(0)

        m = SLD(1.)

        for i, slab in enumerate(self.left_slabs):
            layer = m(slab.thick.value, slab.rough.value)
            if not i:
                layer.rough.value = 0
            layer.vfsolv.value = slab.vfsolv.value
            s |= layer

        polymer_slabs = self.slabs()
        offset = np.sum(s.slabs()[:, 0])

        for i in range(np.size(polymer_slabs, 0)):
            layer = m(polymer_slabs[i, 0], polymer_slabs[i, 3])
            layer.vfsolv.value = polymer_slabs[i, -1]
            s |= layer

        for i, slab in enumerate(self.right_slabs):
            layer = m(slab.thick.value, slab.rough.value)
            layer.vfsolv.value = 1 - slab.vfsolv.value
            s |= layer

        s |= SLD(0, 0)

        # now calculate the VFP.
        total_thickness = np.sum(s.slabs()[:, 0])
        zed = np.linspace(0, total_thickness, total_thickness + 1)
        # SLD profile puts a very small roughness on the interfaces with zero
        # roughness.
        zed[0] = 0.01
        z, s = s.sld_profile(z=zed)
        s[0] = s[1]

        # perhaps you'd like to plot the knot locations
        zeds = np.cumsum(self.dz)
        if np.sum(self.dz) > 1:
            zeds /= np.sum(self.dz)
            zeds = np.clip(zeds, 0, 1)

        zed_knots = zeds * float(self.extent) + offset

        if extra:
            return z, s, zed_knots, np.array(self.vf)
        else:
            return z, s
예제 #14
0
파일: spline.py 프로젝트: llimeht/refnx
class Spline(Component):
    """
    Freeform modelling of the real part of an SLD profile using spline
    interpolation.

    Parameters
    ----------
    extent : float or Parameter
        Total extent of spline region
    vs : Sequence of float/Parameter
        the real part of the SLD values of each of the knots.
    dz : Sequence of float/Parameter
        the lateral offset between successive knots.
    left : refnx.reflect.Component
        The Component to the left of this Spline region.
    right : refnx.reflect.Component
        The Component to the right of this Spline region.
    solvent : refnx.reflect.SLD
        An SLD instance representing the solvent
    name : str
        Name of component
    interpolator : scipy.interpolate Univariate Interpolator, optional
        Which scipy.interpolate Univariate Interpolator to use.
    zgrad : bool, optional
        If true then extra control knots are placed outside this spline
        with the same SLD as the materials on the left and right. With a
        monotonic interpolator this guarantees that the gradient is zero
        at either end of the interval.
    microslab_max_thickness : float
        Maximum size of the microslabs approximating the spline.

    Notes
    -----
    This spline component only generates the real part of the SLD (thereby
    assuming that the imaginary part is negligible).
    The sequence dz are the lateral offsets of the knots normalised to a
    unit interval [0, 1]. The reason for using lateral offsets is
    so that the knots are monotonically increasing in location. When each
    dz offset is turned into a Parameter it is given bounds in [0, 1].
    Thus with an extent of 500, and dz = [0.1, 0.2, 0.2], the knots will be
    at [0, 50, 150, 250, 500]. Notice that there are two extra knots for
    the start and end of the interval (disregarding the `zgrad` control
    knots). If ``np.sum(dz) > 1``, then the knot spacings are normalised to
    1. e.g. dz of [0.1, 0.2, 0.9] would result in knots (in the normalised
    interval) of [0, 0.0833, 0.25, 1, 1].
    If `vs` is monotonic then the output spline will be monotonic. If `vs`
    is not monotonic then there may be regions of the spline larger or
    smaller than `left` or `right`.
    The slab representation of this component are approximated using a
    'microslab' representation of spline. The max thickness of each
    microslab is `microslab_max_thickness`.
    """
    def __init__(self,
                 extent,
                 vs,
                 dz,
                 left,
                 right,
                 solvent,
                 name='',
                 interpolator=Pchip,
                 zgrad=True,
                 microslab_max_thickness=1):
        self.name = name
        self.left_slab = left
        self.right_slab = right
        self.solvent = solvent
        self.microslab_max_thickness = microslab_max_thickness

        self.extent = (possibly_create_parameter(extent,
                                                 name='%s - spline extent' %
                                                 name))

        self.dz = Parameters(name='dz - spline')
        for i, z in enumerate(dz):
            p = possibly_create_parameter(z,
                                          name='%s - spline dz[%d]' %
                                          (name, i))
            p.range(0, 1)
            self.dz.append(p)

        self.vs = Parameters(name='vs - spline')
        for i, v in enumerate(vs):
            p = possibly_create_parameter(v,
                                          name='%s - spline vs[%d]' %
                                          (name, i))
            self.vs.append(p)

        if len(self.vs) != len(self.dz):
            raise ValueError("dz and vs must have same number of entries")

        self.zgrad = zgrad
        self.interpolator = interpolator

        self.__cached_interpolator = {
            'zeds': np.array([]),
            'vs': np.array([]),
            'interp': None,
            'extent': -1
        }

    def _interpolator(self):
        dz = np.array(self.dz)
        zeds = np.cumsum(dz)

        # if dz's sum to more than 1, then normalise to unit interval.
        if zeds[-1] > 1:
            zeds /= zeds[-1]
            zeds = np.clip(zeds, 0, 1)

        vs = np.array(self.vs)

        left_sld = Structure.overall_sld(
            np.atleast_2d(self.left_slab.slabs[-1]), self.solvent)[..., 1]

        right_sld = Structure.overall_sld(
            np.atleast_2d(self.right_slab.slabs[0]), self.solvent)[..., 1]

        if self.zgrad:
            zeds = np.concatenate([[-1.1, 0 - EPS], zeds, [1 + EPS, 2.1]])
            vs = np.concatenate([left_sld, left_sld, vs, right_sld, right_sld])
        else:
            zeds = np.concatenate([[0 - EPS], zeds, [1 + EPS]])
            vs = np.concatenate([left_sld, vs, right_sld])

        # cache the interpolator
        cache_zeds = self.__cached_interpolator['zeds']
        cache_vs = self.__cached_interpolator['vs']
        cache_extent = self.__cached_interpolator['extent']

        # you don't need to recreate the interpolator
        if (np.array_equal(zeds, cache_zeds) and np.array_equal(vs, cache_vs)
                and np.equal(self.extent, cache_extent)):
            return self.__cached_interpolator['interp']
        else:
            self.__cached_interpolator['zeds'] = zeds
            self.__cached_interpolator['vs'] = vs
            self.__cached_interpolator['extent'] = float(self.extent)

        # TODO make vfp zero for z > self.extent
        interpolator = self.interpolator(zeds, vs)
        self.__cached_interpolator['interp'] = interpolator
        return interpolator

    def __call__(self, z):
        """
        Calculates the spline value at z

        Parameters
        ----------
        z : float
            Distance along spline

        Returns
        -------
        sld : float
            Real part of SLD
        """
        interpolator = self._interpolator()
        vs = interpolator(z / float(self.extent))
        return vs

    @property
    def parameters(self):
        p = Parameters(name=self.name)
        p.extend([
            self.extent, self.dz, self.vs, self.left_slab.parameters,
            self.right_slab.parameters, self.solvent.parameters
        ])
        return p

    def logp(self):
        return 0

    @property
    def slabs(self):
        num_slabs = np.ceil(float(self.extent) / self.microslab_max_thickness)
        slab_thick = float(self.extent / num_slabs)
        slabs = np.zeros((int(num_slabs), 5))
        slabs[:, 0] = slab_thick

        # give last slab a miniscule roughness so it doesn't get contracted
        slabs[-1:, 3] = 0.5

        dist = np.cumsum(slabs[..., 0]) - 0.5 * slab_thick
        slabs[:, 1] = self(dist)

        return slabs
예제 #15
0
class MixedReflectModel(object):
    r"""
    Calculates an incoherent average of reflectivities from a sequence of
    structures. Such a situation may occur if a sample is not uniform over its
    illuminated area.

    Parameters
    ----------
    structures : sequence of refnx.reflect.Structure
        The interfacial structures to incoherently average
    scales : None, sequence of float or refnx.analysis.Parameter, optional
        scale factors. The reflectivities calculated from each of the
        structures are multiplied by their respective scale factor during
        overall summation. These values are turned into Parameters during the
        construction of this object.
        You must supply a scale factor for each of the structures. If `scales`
        is `None`, then default scale factors are used:
        `[1 / len(structures)] * len(structures)`. It is a good idea to set the
        lower bound of each scale factor to zero (not done by default).
    bkg : float or refnx.analysis.Parameter, optional
        linear background added to the overall reflectivity. This is turned
        into a Parameter during the construction of this object.
    name : str, optional
        Name of the mixed Model
    dq : float or refnx.analysis.Parameter, optional

        - `dq == 0` then no resolution smearing is employed.
        - `dq` is a float or refnx.analysis.Parameter
           a constant dQ/Q resolution smearing is employed.  For 5% resolution
           smearing supply 5.

        However, if `x_err` is supplied to the `model` method, then that
        overrides any setting given here. This value is turned into
        a Parameter during the construction of this object.
    threads: int, optional
        Specifies the number of threads for parallel calculation. This
        option is only applicable if you are using the ``_creflect``
        module. The option is ignored if using the pure python calculator,
        ``_reflect``. If `threads == -1` then all available processors are
        used.
    quad_order: int, optional
        the order of the Gaussian quadrature polynomial for doing the
        resolution smearing. default = 17. Don't choose less than 13. If
        quad_order == 'ultimate' then adaptive quadrature is used. Adaptive
        quadrature will always work, but takes a _long_ time (2 or 3 orders
        of magnitude longer). Fixed quadrature will always take a lot less
        time. BUT it won't necessarily work across all samples. For
        example, 13 points may be fine for a thin layer, but will be
        atrocious at describing a multilayer with bragg peaks.
    dq_type: {'pointwise', 'constant'}, optional
        Chooses whether pointwise or constant dQ/Q resolution smearing (see
        `dq` keyword) is used. To use pointwise smearing the `x_err` keyword
        provided to `Objective.model` method must be an array, otherwise the
        smearing falls back to 'constant'.
    q_offset: float or refnx.analysis.Parameter, optional
        Compensates for uncertainties in the angle at which the measurement is
        performed. A positive/negative `q_offset` corresponds to a situation
        where the measured q values (incident angle) may have been under/over
        estimated, and has the effect of shifting the calculated model to
        lower/higher effective q values.
    """

    def __init__(
        self,
        structures,
        scales=None,
        bkg=1e-7,
        name="",
        dq=5.0,
        threads=-1,
        quad_order=17,
        dq_type="pointwise",
        q_offset=0.0,
    ):
        self.name = name
        self._parameters = None
        self.threads = threads
        self.quad_order = quad_order

        # all reflectometry models need a scale factor and background. Set
        # them all to 1 by default.
        pscales = Parameters(name="scale factors")

        if scales is not None and len(structures) == len(scales):
            tscales = scales
        elif scales is not None and len(structures) != len(scales):
            raise ValueError(
                "You need to supply scale factor for each" " structure"
            )
        else:
            tscales = [1 / len(structures)] * len(structures)

        for scale in tscales:
            p_scale = possibly_create_parameter(scale, name="scale")
            pscales.append(p_scale)

        self._scales = pscales
        self._bkg = possibly_create_parameter(bkg, name="bkg")

        # we can optimize the resolution (but this is always overridden by
        # x_err if supplied. There is therefore possibly no dependence on it.
        self._dq = possibly_create_parameter(dq, name="dq - resolution")
        self.dq_type = dq_type

        self._q_offset = possibly_create_parameter(q_offset, name="q_offset")

        self._structures = structures

    def __repr__(self):
        s = (
            f"MixedReflectModel({self._structures!r},"
            f" scales={self._scales!r}, bkg={self._bkg!r},"
            f" name={self.name!r}, dq={self._dq!r},"
            f" threads={self.threads!r}, quad_order={self.quad_order!r},"
            f" dq_type={self.dq_type!r}, q_offset={self.q_offset!r})"
        )
        return s

    def __call__(self, x, p=None, x_err=None):
        r"""
        Calculate the generative model

        Parameters
        ----------
        x : float or np.ndarray
            q values for the calculation.
        p : refnx.analysis.Parameters, optional
            parameters required to calculate the model
        x_err : np.ndarray
            dq resolution smearing values for the dataset being considered.

        Returns
        -------
        reflectivity : np.ndarray
            Calculated reflectivity

        """
        return self.model(x, p=p, x_err=x_err)

    @property
    def dq(self):
        r"""
        :class:`refnx.analysis.Parameter`

            - `dq.value == 0`
               no resolution smearing is employed.
            - `dq.value > 0`
               a constant dQ/Q resolution smearing is employed.  For 5%
               resolution smearing supply 5. However, if `x_err` is supplied to
               the `model` method, then that overrides any setting reported
               here.

        """
        return self._dq

    @dq.setter
    def dq(self, value):
        self._dq.value = value

    @property
    def q_offset(self):
        r"""
        :class:`refnx.analysis.Parameter` - compensates for any angular
        misalignment during an experiment.

        """
        return self._q_offset

    @q_offset.setter
    def q_offset(self, value):
        self._q_offset.value = value

    @property
    def scales(self):
        r"""
        :class:`refnx.analysis.Parameter` - the reflectivity from each of the
        structures are multiplied by these values to account for patchiness.
        """
        return self._scales

    @property
    def bkg(self):
        r"""
        :class:`refnx.analysis.Parameter` - linear background added to all
        model values.

        """
        return self._bkg

    @bkg.setter
    def bkg(self, value):
        self._bkg.value = value

    def model(self, x, p=None, x_err=None):
        r"""
        Calculate the reflectivity of this model

        Parameters
        ----------
        x : float or np.ndarray
            q values for the calculation.
        p : refnx.analysis.Parameter, optional
            parameters required to calculate the model
        x_err : np.ndarray
            dq resolution smearing values for the dataset being considered.

        Returns
        -------
        reflectivity : np.ndarray

        """
        if p is not None:
            self.parameters.pvals = np.array(p)
        if x_err is None or self.dq_type == "constant":
            # fallback to what this object was constructed with
            x_err = float(self.dq)

        scales = np.array(self.scales)

        y = np.zeros_like(x)

        for scale, structure in zip(scales, self.structures):
            y += reflectivity(
                x + self.q_offset.value,
                structure.slabs()[..., :4],
                scale=scale,
                dq=x_err,
                threads=self.threads,
                quad_order=self.quad_order,
            )

        return y + self.bkg.value

    def logp(self):
        r"""
        Additional log-probability terms for the reflectivity model. Do not
        include log-probability terms for model parameters, these are
        automatically calculated elsewhere.

        Returns
        -------
        logp : float
            log-probability of structure.

        """
        logp = 0
        for structure in self._structures:
            logp += structure.logp()

        return logp

    @property
    def structures(self):
        r"""
        list of :class:`refnx.reflect.Structure` that describe the patchiness
        of the surface.

        """
        return self._structures

    @property
    def parameters(self):
        r"""
        :class:`refnx.analysis.Parameters` - parameters associated with this
        model.

        """
        p = Parameters(name="instrument parameters")
        p.extend([self.scales, self.bkg, self.dq, self.q_offset])

        self._parameters = Parameters(name=self.name)
        self._parameters.append([p])
        self._parameters.extend(
            [structure.parameters for structure in self._structures]
        )
        return self._parameters
예제 #16
0
def ReadNistData(dataset, start="start2"):
    """
    NIST STRD data is in a simple, fixed format with line numbers being
    significant!
    """
    with open(os.path.join(NIST_DIR, "%s.dat" % dataset), "r") as finp:
        lines = [line[:-1] for line in finp.readlines()]

    model_lines = lines[30:39]
    param_lines = lines[40:58]
    data_lines = lines[60:]

    words = model_lines[1].strip().split()
    nparams = int(words[0])

    start1 = np.zeros(nparams)
    start2 = np.zeros(nparams)
    certval = np.zeros(nparams)
    certerr = np.zeros(nparams)

    for i, text in enumerate(param_lines[:nparams]):
        [s1, s2, val, err] = [float(x) for x in text.split("=")[1].split()]
        start1[i] = s1
        start2[i] = s2
        certval[i] = val
        certerr[i] = err

    for t in param_lines[nparams:]:
        t = t.strip()
        if ":" not in t:
            continue
        val = float(t.split(":")[1])
        if t.startswith("Residual Sum of Squares"):
            sum_squares = val
        elif t.startswith("Residual Standard Deviation"):
            std_dev = val
        elif t.startswith("Degrees of Freedom"):
            nfree = int(val)
        elif t.startswith("Number of Observations"):
            ndata = int(val)

    y, x = [], []
    for d in data_lines:
        vals = [float(i) for i in d.strip().split()]
        y.append(vals[0])
        if len(vals) > 2:
            x.append(vals[1:])
        else:
            x.append(vals[1])

    y = array(y)
    x = array(x)

    params = Parameters()
    for i in range(nparams):
        pname = "p%i" % (i + 1)
        if start == "start2":
            pval = start2[i]
        elif start == "start1":
            pval = start1[i]
        p = Parameter(pval, name=pname, vary=True)
        params.append(p)

    out = {
        "y": y,
        "x": x,
        "nparams": nparams,
        "ndata": ndata,
        "nfree": nfree,
        "start": params,
        "sum_squares": sum_squares,
        "std_dev": std_dev,
        "cert_values": certval,
        "cert_stderr": certerr,
    }

    return out
예제 #17
0
파일: NISTModels.py 프로젝트: llimeht/refnx
def ReadNistData(dataset, start='start2'):
    """
    NIST STRD data is in a simple, fixed format with line numbers being
    significant!
    """
    with open(os.path.join(NIST_DIR, "%s.dat" % dataset), 'r') as finp:
        lines = [l[:-1] for l in finp.readlines()]

    model_lines = lines[30:39]
    param_lines = lines[40:58]
    data_lines = lines[60:]

    words = model_lines[1].strip().split()
    nparams = int(words[0])

    start1 = np.zeros(nparams)
    start2 = np.zeros(nparams)
    certval = np.zeros(nparams)
    certerr = np.zeros(nparams)

    for i, text in enumerate(param_lines[:nparams]):
        [s1, s2, val, err] = [float(x) for x in text.split('=')[1].split()]
        start1[i] = s1
        start2[i] = s2
        certval[i] = val
        certerr[i] = err

    for t in param_lines[nparams:]:
        t = t.strip()
        if ':' not in t:
            continue
        val = float(t.split(':')[1])
        if t.startswith('Residual Sum of Squares'):
            sum_squares = val
        elif t.startswith('Residual Standard Deviation'):
            std_dev = val
        elif t.startswith('Degrees of Freedom'):
            nfree = int(val)
        elif t.startswith('Number of Observations'):
            ndata = int(val)

    y, x = [], []
    for d in data_lines:
        vals = [float(i) for i in d.strip().split()]
        y.append(vals[0])
        if len(vals) > 2:
            x.append(vals[1:])
        else:
            x.append(vals[1])

    y = array(y)
    x = array(x)

    params = Parameters()
    for i in range(nparams):
        pname = 'p%i' % (i + 1)
        if start == 'start2':
            pval = start2[i]
        elif start == 'start1':
            pval = start1[i]
        p = Parameter(pval, name=pname, vary=True)
        params.append(p)

    out = {'y': y, 'x': x, 'nparams': nparams, 'ndata': ndata,
           'nfree': nfree, 'start': params, 'sum_squares': sum_squares,
           'std_dev': std_dev, 'cert_values': certval,
           'cert_stderr': certerr}

    return out
예제 #18
0
class TestParameters(object):
    def setup_method(self):
        self.a = Parameter(1, name='a')
        self.b = Parameter(2, name='b')
        self.m = Parameters()
        self.m.append(self.a)
        self.m.append(self.b)

    def test_retrieve_by_name(self):
        p = self.m['a']
        assert_(p is self.a)

        # or by index
        p = self.m[0]
        assert_(p is self.a)

    def test_repr(self):
        p = Parameter(value=5, vary=False, name='test')
        g = Parameters(name='name')
        f = Parameters()
        f.append(p)
        f.append(g)

        q = eval(repr(f))
        assert (q.name is None)
        assert_equal(q[0].value, 5)
        assert (q[0].vary is False)
        assert (isinstance(q[1], Parameters))

    def test_set_by_name(self):
        c = Parameter(3.)
        self.m['a'] = c
        assert_(self.m[0] is c)

        # can't set an entry by name, if there isn't an existing name in this
        # Parameters instance.
        from pytest import raises
        with raises(ValueError):
            self.m['abc'] = c

    def test_parameters(self):
        # we've added two parameters
        self.a.vary = True
        self.b.vary = True

        assert_equal(len(self.m.flattened()), 2)

        # the two entries should just be the objects
        assert_(self.m.varying_parameters()[0] is self.a)
        assert_(self.m.varying_parameters()[1] is self.b)

    def test_varying_parameters(self):
        # even though we've added a twice we should still only see 2
        # varying parameters
        self.a.vary = True
        self.b.vary = True
        p = self.a | self.b | self.a
        assert_equal(len(p.varying_parameters()), 2)

    def test_pickle_parameters(self):
        # need to check that Parameters can be pickled/unpickle
        pkl = pickle.dumps(self.m)
        pickle.loads(pkl)

    def test_or(self):
        # concatenation of Parameters
        # Parameters with Parameter
        c = self.m | self.b
        assert_equal(len(c), 3)
        assert_equal(len(c.flattened()), 3)
        assert_(c.flattened()[1] is self.b)
        assert_(c.flattened()[2] is self.b)

        # Parameters with Parameters
        c = Parameters(name='c')
        d = c | self.m
        assert_(d.name == 'c')

    def test_ior(self):
        # concatenation of Parameters
        # Parameters with Parameter
        c = Parameters(name='c')
        c |= self.b
        assert_equal(len(c), 1)
        assert_equal(len(c.flattened()), 1)
        assert_(c.flattened()[0] is self.b)

        # Parameters with Parameters
        c = Parameters(name='c')
        c |= self.m
        assert_(c.name == 'c')
        assert_equal(len(c), 1)
        assert_equal(len(c.flattened()), 2)
        assert_(c.flattened()[1] is self.b)
예제 #19
0
class MetaModel (BaseModel):

    """
    Takes two models with scale factors and combines them
    """

    def __init__(self, models, scales, add_params=None, bkg=1e-7, name='',
                 dq=5, threads=-1, quad_order=17):
        super().__init__(bkg=1e-7, name='', dq=5, threads=-1, quad_order=17)

        self.models = models

        if scales is not None and len(models) == len(scales):
            tscales = scales
        elif scales is not None and len(models) != len(scales):
            raise ValueError("You need to supply scale factor for each"
                             " structure")
        else:
            tscales = [1 / len(models)] * len(models)

        pscales = []
        for scale_num, scale in enumerate(tscales):
            p_scale = possibly_create_parameter(scale, name='scale %d'%scale_num)
            pscales.append(p_scale)

        self._scales = pscales

        if add_params is not None:
            self.additional_params = []
            for param in add_params:
                self.additional_params.append(param)


    def __call__(self, x, p=None, x_err=None):

        r"""
        Calculate the generative model

        Parameters
        ----------
        x : float or np.ndarray
            q values for the calculation.
        p : refnx.analysis.Parameters, optional
            parameters required to calculate the model
        x_err : np.ndarray
            dq resolution smearing values for the dataset being considered.

        Returns
        -------
        reflectivity : np.ndarray
            Calculated reflectivity

        """
        return self.model(x, p=p, x_err=x_err)

    def model(self, x, p=None, x_err=None):
        r"""
        Calculate the reflectivity of this model

        Parameters
        ----------
        x : float or np.ndarray
            q values for the calculation.
        p : refnx.analysis.Parameter, optional
            parameters required to calculate the model
        x_err : np.ndarray
            dq resolution smearing values for the dataset being considered.

        Returns
        -------
        reflectivity : np.ndarray

        """
        meta_model = np.zeros_like(x)
        for model, scale in zip(self.models, self._scales):
            model.bkg.setp(0)
            meta_model += model(x, p, x_err) * scale.value

        return meta_model + self.bkg.value

    def logp(self):
        r"""
        Additional log-probability terms for the reflectivity model. Do not
        include log-probability terms for model parameters, these are
        automatically calculated elsewhere.

        Returns
        -------
        logp : float
            log-probability of structure.
        """

        logp = 0
        for model in self.models:
            logp += model.logp()

        return logp

    @property
    def scales(self):
        r"""
        :class:`refnx.analysis.Parameter` - the reflectivity from each of the
        structures are multiplied by these values to account for patchiness.
        """
        return self._scales
    
    @property
    def parameters(self):
        r"""
        :class:`refnx.analysis.Parameters` - parameters associated with this
        model.

        """
        p = Parameters(name='meta instrument parameters')
        p.extend([self.bkg, self.dq])
        p.extend(self.additional_params)
        self._parameters = Parameters(name=self.name)

        for model, scale in zip(self.models, self._scales):
            p.extend([scale])
            p.extend(model.parameters.flattened())

        self._parameters.append(p)
   
        return self._parameters
예제 #20
0
class DistributionModel (object):
    """
    structure : refnx structure object
        The interfacial structure.
    loc_in_struct : int
        The index of the structural component that you want to impliment as a
        distribution.
    param_name : str
        the name of the parameter of the distribution component that you want
        to vary. (Currently only thickness is implimented)
    pdf : function
        if None, will default to a normal distribution
    pdf_kwargs : dict
        Dictionary with kwargs for the pdf. This will be used to parameterise
        the pdf.
    num_structs : int
        number of discrete points that will be generated along the pdf
    scale : float or refnx.analysis.Parameter, optional
        NOT IMPLIMENTED
    bkg : float or refnx.analysis.Parameter, optional
        Q-independent constant background added to all model values. This is
        turned into a Parameter during the construction of this object.
    name : str, optional
        Name of the Model
    dq : float or refnx.analysis.Parameter, optional

        - `dq == 0` then no resolution smearing is employed.
        - `dq` is a float or refnx.analysis.Parameter
           a constant dQ/Q resolution smearing is employed.  For 5% resolution
           smearing supply 5.

        However, if `x_err` is supplied to the `model` method, then that
        overrides any setting given here. This value is turned into
        a Parameter during the construction of this object.
    threads: int, optional
        Specifies the number of threads for parallel calculation. This
        option is only applicable if you are using the ``_creflect``
        module. The option is ignored if using the pure python calculator,
        ``_reflect``. If `threads == 0` then all available processors are
        used.
    quad_order: int, optional
        the order of the Gaussian quadrature polynomial for doing the
        resolution smearing. default = 17. Don't choose less than 13. If
        quad_order == 'ultimate' then adaptive quadrature is used. Adaptive
        quadrature will always work, but takes a _long_ time (2 or 3 orders
        of magnitude longer). Fixed quadrature will always take a lot less
        time. BUT it won't necessarily work across all samples. For
        example, 13 points may be fine for a thin layer, but will be
        atrocious at describing a multilayer with bragg peaks.

    """
    def __init__ (self, structure, loc_in_struct, param_name='Thickness',
                  pdf=None, pdf_kwargs=None, num_structs=11, scale=1, bkg=1e-7,
                  name='', dq=5, threads=-1, quad_order=17):

        self.name = name
        self._parameters = None
        self.threads = threads
        self.quad_order = quad_order
        self.master_structure = structure
        self.loc_in_struct = loc_in_struct
        self.param_name = param_name.lower()
        self.num_structs = num_structs
        self._scale = scale

        if pdf is None:
            self.pdf = norm.pdf

            if pdf_kwargs is None:
                self.pdf_params = []
                self.pdf_params.append(possibly_create_parameter(value=10, name='loc'))
                self.pdf_params.append(possibly_create_parameter(value=1, name='scale'))
            else:
                print ('Warning: You have provided pdf_kwargs without providing a pdf')
        else:
            assert pdf_kwargs is not None, 'You must supply pdf_kwargs'
            self.pdf = pdf
            self.pdf_params = []
            for kw in pdf_kwargs:
                self.pdf_params.append(possibly_create_parameter(pdf_kwargs[kw], name=kw))

        self._structures = self.create_structures()
        self._scales = np.ones(self.num_structs)/self.num_structs
        self._bkg = possibly_create_parameter(bkg, name='bkg')

        # we can optimize the resolution (but this is always overridden by
        # x_err if supplied. There is therefore possibly no dependence on it.
        self._dq = possibly_create_parameter(dq, name='dq - resolution')

        self.generate_thicknesses()

    def __call__(self, x, p=None, x_err=None):
        r"""
        Calculate the generative model

        Parameters
        ----------
        x : float or np.ndarray
            q values for the calculation.
        p : refnx.analysis.Parameters, optional
            parameters required to calculate the model
        x_err : np.ndarray
            dq resolution smearing values for the dataset being considered.

        Returns
        -------
        reflectivity : np.ndarray
            Calculated reflectivity

        """
        return self.model(x, p=p, x_err=x_err)
    
    def __repr__(self):
        return ("DistributionModel({master_structure!r}, name={name!r},"
                " scale={_scale!r}, bkg={_bkg!r},"
                " dq={_dq!r}, threads={threads},"
                " quad_order={quad_order})".format(**self.__dict__))

    def create_structures(self):
        structures = []
        self.distribution_params = []
        COI = self.master_structure[self.loc_in_struct]

        for i in range(self.num_structs):
            new_COI = copy(COI)

            if self.param_name == 'thickness':
                new_COI.thick = Parameter(name='%d - Thick' % i,
                                          value=new_COI.thick.value, vary=False)
                self.distribution_params.append(new_COI.thick)
            elif self.param_name == 'adsorbed amount':
                new_COI.adsorbed_amount = Parameter(name='%d - Ads. amnt.' % i,
                                                    value=new_COI.adsorbed_amount.value, vary=False)
                self.distribution_params.append(new_COI.adsorbed_amount)
            else:
                print('param_name not recognized')

            struct = self.master_structure[0]

            for component in self.master_structure[1:]:
                if component is not COI:
                    struct = struct | component
                else:
                    struct = struct | new_COI

            struct.solvent = self.master_structure.solvent
            structures.append(struct)

        return structures

    @property
    def pdf_kwargs(self):
        temp = {}
        for param in self.pdf_params:
            temp[param.name] = param.value
        return temp

    def generate_thicknesses(self):
        d = np.linspace(0, 5000, 10000)
        pdf = self.pdf(d, **self.pdf_kwargs)
        effective_component = d[pdf > 0.01*pdf.max()]
        effective_pdf = pdf[pdf > 0.01*pdf.max()]
        effective_range = [effective_component.min(), effective_component.max()]

        pvals = np.linspace(*effective_range, num=self.num_structs)

        for pval, param in zip(pvals, self.distribution_params):
            param.value = pval

        scales = np.interp(pvals, effective_component, effective_pdf)
        self._scales = scales/np.sum(scales)

    @property
    def dq(self):
        r"""
        :class:`refnx.analysis.Parameter`

            - `dq.value == 0`
               no resolution smearing is employed.
            - `dq.value > 0`
               a constant dQ/Q resolution smearing is employed.  For 5%
               resolution smearing supply 5. However, if `x_err` is supplied to
               the `model` method, then that overrides any setting reported
               here.
        """
        return self._dq

    @dq.setter
    def dq(self, value):
        self._dq.value = value

    @property
    def scales(self):
        r"""
        :class:`refnx.analysis.Parameter` - the reflectivity from each of the
        structures are multiplied by these values to account for patchiness.
        """
        return self._scales

    @property
    def bkg(self):
        r"""
        :class:`refnx.analysis.Parameter` - linear background added to all
        model values.

        """
        return self._bkg

    @bkg.setter
    def bkg(self, value):
        self._bkg.value = value

    def model(self, x, p=None, x_err=None):
        r"""
        Calculate the reflectivity of this model

        Parameters
        ----------
        x : float or np.ndarray
            q values for the calculation.
        p : refnx.analysis.Parameter, optional
            parameters required to calculate the model
        x_err : np.ndarray
            dq resolution smearing values for the dataset being considered.

        Returns
        -------
        reflectivity : np.ndarray

        """
        self.generate_thicknesses()

        if p is not None:
            self.parameters.pvals = np.array(p)
        if x_err is None:
            # fallback to what this object was constructed with
            x_err = float(self.dq)

        scales = np.array(self.scales)

        y = np.zeros_like(x)

        for scale, structure in zip(scales, self.structures):
            y += reflectivity(x,
                              structure.slabs()[..., :4],
                              scale=scale,
                              dq=x_err,
                              threads=self.threads,
                              quad_order=self.quad_order)

        return self._scale*y + self.bkg.value

    def logp(self):
        r"""
        Additional log-probability terms for the reflectivity model. Do not
        include log-probability terms for model parameters, these are
        automatically calculated elsewhere.

        Returns
        -------
        logp : float
            log-probability of structure.

        """
        logp = 0
        for structure in self._structures:
            logp += structure.logp()

        return logp

    @property
    def structures(self):
        r"""
        list of :class:`refnx.reflect.Structure` that describe the patchiness
        of the surface.

        """
        return self._structures

    @property
    def parameters(self):
        r"""
        :class:`refnx.analysis.Parameters` - parameters associated with this
        model.

        """
        p = Parameters(name='instrument parameters')
        p.extend([self.pdf_params, self.bkg, self.dq])

        self._parameters = Parameters(name=self.name)
        self._parameters.append([p])
        self._parameters.extend([structure.parameters for structure
                                 in self._structures])
        return self._parameters
예제 #21
0
    def test_covar(self):
        # checks objective.covar against optimize.least_squares covariance.
        path = os.path.dirname(os.path.abspath(__file__))

        theoretical = np.loadtxt(os.path.join(path, 'gauss_data.txt'))
        xvals, yvals, evals = np.hsplit(theoretical, 3)
        xvals = xvals.flatten()
        yvals = yvals.flatten()
        evals = evals.flatten()

        p0 = np.array([0.1, 20., 0.1, 0.1])
        names = ['bkg', 'A', 'x0', 'width']
        bounds = [(-1, 1), (0, 30), (-5., 5.), (0.001, 2)]

        params = Parameters(name="gauss_params")
        for p, name, bound in zip(p0, names, bounds):
            param = Parameter(p, name=name)
            param.range(*bound)
            param.vary = True
            params.append(param)

        model = Model(params, fitfunc=gauss)
        data = Data1D((xvals, yvals, evals))
        objective = Objective(model, data)

        # first calculate least_squares jac/hess/covariance matrices
        res = least_squares(objective.residuals,
                            np.array(params),
                            jac='3-point')

        hess_least_squares = np.matmul(res.jac.T, res.jac)
        covar_least_squares = np.linalg.inv(hess_least_squares)

        # now calculate corresponding matrices by hand, to see if the approach
        # concurs with least_squares
        objective.setp(res.x)
        _pvals = np.array(res.x)

        def residuals_scaler(vals):
            return np.squeeze(objective.residuals(_pvals * vals))

        jac = approx_derivative(residuals_scaler, np.ones_like(_pvals))
        hess = np.matmul(jac.T, jac)
        covar = np.linalg.inv(hess)

        covar = covar * np.atleast_2d(_pvals) * np.atleast_2d(_pvals).T

        assert_allclose(covar, covar_least_squares)

        # check that objective.covar corresponds to the least_squares
        # covariance matrix
        objective.setp(res.x)
        _pvals = np.array(res.x)
        covar_objective = objective.covar()
        assert_allclose(covar_objective, covar_least_squares)

        # now see what happens with a parameter that has no effect on residuals
        param = Parameter(1.234, name='dummy')
        param.vary = True
        params.append(param)

        from pytest import raises
        with raises(LinAlgError):
            objective.covar()
예제 #22
0
class TestFitterGauss(object):
    # Test CurveFitter with a noisy gaussian, weighted and unweighted, to see
    # if the parameters and uncertainties come out correct

    @pytest.fixture(autouse=True)
    def setup_method(self, tmpdir):
        self.path = os.path.dirname(os.path.abspath(__file__))
        self.tmpdir = tmpdir.strpath

        theoretical = np.loadtxt(os.path.join(self.path, "gauss_data.txt"))
        xvals, yvals, evals = np.hsplit(theoretical, 3)
        xvals = xvals.flatten()
        yvals = yvals.flatten()
        evals = evals.flatten()

        # these best weighted values and uncertainties obtained with Igor
        self.best_weighted = [-0.00246095, 19.5299, -8.28446e-2, 1.24692]

        self.best_weighted_errors = [
            0.0220313708486,
            1.12879436221,
            0.0447659158681,
            0.0412022938883,
        ]

        self.best_weighted_chisqr = 77.6040960351

        self.best_unweighted = [
            -0.10584111872702096,
            19.240347049328989,
            0.0092623066070940396,
            1.501362314145845,
        ]

        self.best_unweighted_errors = [
            0.34246565477,
            0.689820935208,
            0.0411243173041,
            0.0693429375282,
        ]

        self.best_unweighted_chisqr = 497.102084956

        self.p0 = np.array([0.1, 20.0, 0.1, 0.1])
        self.names = ["bkg", "A", "x0", "width"]
        self.bounds = [(-1, 1), (0, 30), (-5.0, 5.0), (0.001, 2)]

        self.params = Parameters(name="gauss_params")
        for p, name, bound in zip(self.p0, self.names, self.bounds):
            param = Parameter(p, name=name)
            param.range(*bound)
            param.vary = True
            self.params.append(param)

        self.model = Model(self.params, fitfunc=gauss)
        self.data = Data1D((xvals, yvals, evals))
        self.objective = Objective(self.model, self.data)
        return 0

    def test_pickle(self):
        # tests if a CurveFitter can be pickled/unpickled.
        f = CurveFitter(self.objective)
        pkl = pickle.dumps(f)
        g = pickle.loads(pkl)
        g._check_vars_unchanged()

    def test_best_weighted(self):
        assert_equal(len(self.objective.varying_parameters()), 4)
        self.objective.setp(self.p0)

        f = CurveFitter(self.objective, nwalkers=100)
        res = f.fit("least_squares", jac="3-point")

        output = res.x
        assert_almost_equal(output, self.best_weighted, 3)
        assert_almost_equal(self.objective.chisqr(), self.best_weighted_chisqr,
                            5)

        # compare the residuals
        res = (self.data.y - self.model(self.data.x)) / self.data.y_err
        assert_equal(self.objective.residuals(), res)

        # compare objective.covar to the best_weighted_errors
        uncertainties = [param.stderr for param in self.params]
        assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.005)

        # we're also going to try the checkpointing here.
        checkpoint = os.path.join(self.tmpdir, "checkpoint.txt")

        # compare samples to best_weighted_errors
        np.random.seed(1)
        f.sample(steps=201, random_state=1, verbose=False, f=checkpoint)
        process_chain(self.objective, f.chain, nburn=50, nthin=10)
        uncertainties = [param.stderr for param in self.params]
        assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.07)

        # test that the checkpoint worked
        check_array = np.loadtxt(checkpoint)
        check_array = check_array.reshape(201, f._nwalkers, f.nvary)
        assert_allclose(check_array, f.chain)

        # test loading the checkpoint
        chain = load_chain(checkpoint)
        assert_allclose(chain, f.chain)

        f.initialise("jitter")
        f.sample(steps=2, nthin=4, f=checkpoint, verbose=False)
        assert_equal(f.chain.shape[0], 2)

        # we should be able to produce 2 * 100 steps from the generator
        g = self.objective.pgen(ngen=20000000000)
        s = [i for i, a in enumerate(g)]
        assert_equal(np.max(s), 200 - 1)
        g = self.objective.pgen(ngen=200)
        pvec = next(g)
        assert_equal(pvec.size, len(self.objective.parameters.flattened()))

        # check that all the parameters are returned via pgen, not only those
        # being varied.
        self.params[0].vary = False
        f = CurveFitter(self.objective, nwalkers=100)
        f.initialise("jitter")
        f.sample(steps=2, nthin=4, f=checkpoint, verbose=False)
        g = self.objective.pgen(ngen=100)
        pvec = next(g)
        assert_equal(pvec.size, len(self.objective.parameters.flattened()))

        # the following test won't work because of emcee/gh226.
        # chain = load_chain(checkpoint)
        # assert_(chain.shape == f.chain.shape)
        # assert_allclose(chain, f.chain)

        # try reproducing best fit with parallel tempering
        self.params[0].vary = True
        f = CurveFitter(self.objective, nwalkers=100, ntemps=10)
        f.fit("differential_evolution", seed=1)

        f.sample(steps=201, random_state=1, verbose=False)
        process_chain(self.objective, f.chain, nburn=50, nthin=15)
        print(self.params[0].chain.shape, self.params[0].chain)

        uncertainties = [param.stderr for param in self.params]
        assert_allclose(uncertainties, self.best_weighted_errors, rtol=0.07)

    def test_best_unweighted(self):
        self.objective.weighted = False
        f = CurveFitter(self.objective, nwalkers=100)
        res = f.fit()

        output = res.x
        assert_almost_equal(self.objective.chisqr(),
                            self.best_unweighted_chisqr)
        assert_almost_equal(output, self.best_unweighted, 5)

        # compare the residuals
        res = self.data.y - self.model(self.data.x)
        assert_equal(self.objective.residuals(), res)

        # compare objective._covar to the best_unweighted_errors
        uncertainties = np.array([param.stderr for param in self.params])
        assert_almost_equal(uncertainties, self.best_unweighted_errors, 3)

        # the samples won't compare to the covariance matrix...
        # f.sample(nsteps=150, nburn=20, nthin=30, random_state=1)
        # uncertainties = [param.stderr for param in self.params]
        # assert_allclose(uncertainties, self.best_unweighted_errors,
        #                 rtol=0.15)

    def test_all_minimisers(self):
        """test minimisers against the Gaussian fit"""
        f = CurveFitter(self.objective)

        methods = ["differential_evolution", "L-BFGS-B", "least_squares"]
        if hasattr(sciopt, "shgo"):
            methods.append("shgo")
        if hasattr(sciopt, "dual_annealing"):
            methods.append("dual_annealing")

        for method in methods:
            self.objective.setp(self.p0)
            res = f.fit(method=method)
            assert_almost_equal(res.x, self.best_weighted, 3)

        # smoke test to check that we can use nlpost
        self.objective.setp(self.p0)
        logp0 = self.objective.logp()

        # check that probabilities are calculated correctly
        assert_allclose(
            self.objective.logpost(),
            self.objective.logp() + self.objective.logl(),
        )
        assert_allclose(self.objective.nlpost(), -self.objective.logpost())
        assert_allclose(self.objective.nlpost(self.p0),
                        -self.objective.logpost(self.p0))

        # if the priors are all uniform then the only difference between
        # logpost and logl is a constant. A minimiser should converge on the
        # same answer. The following tests examine that.
        # The test works for dual_annealing, but not for differential
        # evolution, not sure why that is.
        self.objective.setp(self.p0)
        res1 = f.fit(method="dual_annealing", seed=1)
        assert_almost_equal(res1.x, self.best_weighted, 3)
        nll1 = self.objective.nll()
        nlpost1 = self.objective.nlpost()

        self.objective.setp(self.p0)
        res2 = f.fit(method="dual_annealing", target="nlpost", seed=1)
        assert_almost_equal(res2.x, self.best_weighted, 3)
        nll2 = self.objective.nll()
        nlpost2 = self.objective.nlpost()

        assert_allclose(nlpost1, nlpost2, atol=0.001)
        assert_allclose(nll1, nll2, atol=0.001)

        # these two priors are calculated for different parameter values
        # (before and after the fit) they should be the same because all
        # the parameters have uniform priors.
        assert_almost_equal(self.objective.logp(), logp0)

    def test_pymc3_sample(self):
        # test sampling with pymc3
        try:
            import pymc3 as pm
            from refnx.analysis import pymc3_model
        except (ModuleNotFoundError, ImportError, AttributeError):
            # can't run test if pymc3/theano not installed
            return

        with pymc3_model(self.objective):
            s = pm.NUTS()
            pm.sample(
                200,
                tune=100,
                step=s,
                discard_tuned_samples=True,
                compute_convergence_checks=False,
                random_seed=1,
            )
예제 #23
0
class Spline(Component):
    """
    Freeform modelling of the real part of an SLD profile using spline
    interpolation.

    Parameters
    ----------
    extent : float or Parameter
        Total extent of spline region
    vs : Sequence of float/Parameter
        the real part of the SLD values of each of the knots.
    dz : Sequence of float/Parameter
        the lateral offset between successive knots.
    name : str
        Name of component
    interpolator : scipy.interpolate Univariate Interpolator, optional
        Which scipy.interpolate Univariate Interpolator to use.
    zgrad : bool, optional
        If true then extra control knots are placed outside this spline
        with the same SLD as the materials on the left and right. With a
        monotonic interpolator this guarantees that the gradient is zero
        at either end of the interval.
    microslab_max_thickness : float
        Maximum size of the microslabs approximating the spline.

    Notes
    -----
    This spline component only generates the real part of the SLD (thereby
    assuming that the imaginary part is negligible).
    The sequence dz are the lateral offsets of the knots normalised to a
    unit interval [0, 1]. The reason for using lateral offsets is
    so that the knots are monotonically increasing in location. When each
    dz offset is turned into a Parameter it is given bounds in [0, 1].
    Thus with an extent of 500, and dz = [0.1, 0.2, 0.2], the knots will be
    at [0, 50, 150, 250, 500]. Notice that there are two extra knots for
    the start and end of the interval (disregarding the `zgrad` control
    knots). If ``np.sum(dz) > 1``, then the knot spacings are normalised to
    1. e.g. dz of [0.1, 0.2, 0.9] would result in knots (in the normalised
    interval) of [0, 0.0833, 0.25, 1, 1].
    If `vs` is monotonic then the output spline will be monotonic. If `vs`
    is not monotonic then there may be regions of the spline larger or
    smaller than `left` or `right`.
    The slab representation of this component are approximated using a
    'microslab' representation of spline. The max thickness of each
    microslab is `microslab_max_thickness`.

    A Spline component should not be used more than once in a given Structure.
    """
    def __init__(self,
                 extent,
                 vs,
                 dz,
                 name='',
                 interpolator=Pchip,
                 zgrad=True,
                 microslab_max_thickness=1):
        super(Spline, self).__init__()
        self.name = name
        self.microslab_max_thickness = microslab_max_thickness

        self.extent = (possibly_create_parameter(extent,
                                                 name='%s - spline extent' %
                                                 name))

        self.dz = Parameters(name='dz - spline')
        for i, z in enumerate(dz):
            p = possibly_create_parameter(z,
                                          name='%s - spline dz[%d]' %
                                          (name, i))
            p.range(0, 1)
            self.dz.append(p)

        self.vs = Parameters(name='vs - spline')
        for i, v in enumerate(vs):
            p = possibly_create_parameter(v,
                                          name='%s - spline vs[%d]' %
                                          (name, i))
            self.vs.append(p)

        if len(self.vs) != len(self.dz):
            raise ValueError("dz and vs must have same number of entries")

        self.zgrad = zgrad
        self.interpolator = interpolator

        self.__cached_interpolator = {
            'zeds': np.array([]),
            'vs': np.array([]),
            'interp': None,
            'extent': -1
        }

    def __repr__(self):
        s = ("Spline({extent!r}, {vs!r}, {dz!r}, name={name!r}, zgrad={zgrad},"
             " microslab_max_thickness={microslab_max_thickness})")
        return s.format(**self.__dict__)

    def _interpolator(self, structure):
        dz = np.array(self.dz)
        zeds = np.cumsum(dz)

        # if dz's sum to more than 1, then normalise to unit interval.
        if len(zeds) and zeds[-1] > 1:
            # there may be no knots
            zeds /= zeds[-1]
            zeds = np.clip(zeds, 0, 1)

        # note - this means you shouldn't use the same Spline more than once in
        # a Component, because only the first use will be detected.
        try:
            loc = structure.index(self)
            # figure out SLDs for the bracketing Components.
            # note the use of the modulus operator. This means that if the
            # Spline is at the end, then the right most Component will be
            # assumed to be the first Component. This is to aid the use of
            # Spline in a Stack.
            left_component = structure[loc - 1]
            right_component = structure[(loc + 1) % len(structure)]
        except ValueError:
            raise ValueError("Spline didn't appear to be part of a super"
                             " Structure")

        if (isinstance(left_component, Spline)
                or isinstance(right_component, Spline)):
            raise ValueError("Spline must be bracketed by Components that"
                             " aren't Splines.")

        vs = np.array(self.vs)

        left_sld = Structure.overall_sld(
            np.atleast_2d(left_component.slabs(structure)[-1]),
            structure.solvent)[..., 1]

        right_sld = Structure.overall_sld(
            np.atleast_2d(right_component.slabs(structure)[0]),
            structure.solvent)[..., 1]

        if self.zgrad:
            zeds = np.concatenate([[-1.1, 0 - EPS], zeds, [1 + EPS, 2.1]])
            vs = np.concatenate([left_sld, left_sld, vs, right_sld, right_sld])
        else:
            zeds = np.concatenate([[0 - EPS], zeds, [1 + EPS]])
            vs = np.concatenate([left_sld, vs, right_sld])

        # cache the interpolator
        cache_zeds = self.__cached_interpolator['zeds']
        cache_vs = self.__cached_interpolator['vs']
        cache_extent = self.__cached_interpolator['extent']

        # you don't need to recreate the interpolator
        if (np.array_equal(zeds, cache_zeds) and np.array_equal(vs, cache_vs)
                and np.equal(self.extent, cache_extent)):
            return self.__cached_interpolator['interp']
        else:
            self.__cached_interpolator['zeds'] = zeds
            self.__cached_interpolator['vs'] = vs
            self.__cached_interpolator['extent'] = float(self.extent)

        # TODO make vfp zero for z > self.extent
        interpolator = self.interpolator(zeds, vs)
        self.__cached_interpolator['interp'] = interpolator
        return interpolator

    def __call__(self, z, structure):
        """
        Calculates the spline value at z

        Parameters
        ----------
        z : float
            Distance along spline
        structure: refnx.reflect.Structure
            Structure hosting this Component

        Returns
        -------
        sld : float
            Real part of SLD
        """
        interpolator = self._interpolator(structure)
        vs = interpolator(z / float(self.extent))
        return vs

    @property
    def parameters(self):
        p = Parameters(name=self.name)
        p.extend([self.extent, self.dz, self.vs])
        return p

    def logp(self):
        return 0

    def slabs(self, structure=None):
        """
        Slab representation of the spline, as an array

        Parameters
        ----------
        structure : refnx.reflect.Structure
            The Structure hosting this Component
        """
        if structure is None:
            raise ValueError("Spline.slabs() requires a valid Structure")

        num_slabs = np.ceil(float(self.extent) / self.microslab_max_thickness)
        slab_thick = float(self.extent / num_slabs)
        slabs = np.zeros((int(num_slabs), 5))
        slabs[:, 0] = slab_thick

        # give last slab a miniscule roughness so it doesn't get contracted
        slabs[-1:, 3] = 0.5

        dist = np.cumsum(slabs[..., 0]) - 0.5 * slab_thick
        slabs[:, 1] = self(dist, structure)

        return slabs