Exemple #1
0
    def logp(self, pvals=None):
        """
        Calculate the log-prior of the system

        Parameters
        ----------
        pvals : array-like or refnx.analysis.Parameters
            values for the varying or entire set of parameters

        Returns
        -------
        logp : float
            log-prior probability

        Notes
        -----
        The log-prior is calculated as:

        .. code-block:: python

            logp = np.sum(param.logp() for param in
                             self.varying_parameters())

        """
        self.setp(pvals)

        logp = np.sum([
            param.logp() for param in f_unique(
                p for p in flatten(self.parameters) if p.vary)
        ])

        if not np.isfinite(logp):
            return -np.inf

        return logp
Exemple #2
0
    def setp(self, pvals):
        """
        Set the parameters from pvals.

        Parameters
        ----------
        pvals : array-like or refnx.analysis.Parameters
            values for the varying or entire set of parameters

        """
        if pvals is None:
            return

        # set here rather than delegating to a Parameters
        # object, because it may not necessarily be a
        # Parameters object
        _varying_parameters = self.varying_parameters()
        if len(pvals) == len(_varying_parameters):
            for idx, param in enumerate(_varying_parameters):
                param.value = pvals[idx]
            return

        # values supplied are enough to specify all parameter values
        # even those that are repeated
        flattened_parameters = list(flatten(self.parameters))
        if len(pvals) == len(flattened_parameters):
            for idx, param in enumerate(flattened_parameters):
                param.value = pvals[idx]
            return

        raise ValueError('Incorrect number of values supplied, supply either'
                         ' the full number of parameters, or only the varying'
                         ' parameters.')
Exemple #3
0
    def test_spline_smoke(self):
        # smoke test to make Spline at least gives us something
        a = Spline(100, [2], [0.5], zgrad=False, microslab_max_thickness=1)

        s = self.left | a | self.right | self.solvent
        b = a.slabs(s)
        assert_equal(b[:, 2], 0)

        # microslabs are assessed in the middle of the slab
        assert_equal(b[0, 1], a(0.5 * b[0, 0], s))

        # with the ends turned off the profile should be a straight line
        assert_equal(a(50, s), 2.0)

        # construct a structure
        a = Spline(
            100,
            [2.0, 3.0, 4.0],
            [0.25] * 3,
            zgrad=False,
            microslab_max_thickness=1,
        )

        # s.solvent = None
        s = self.left | a | self.right | self.solvent
        # calculate an SLD profile
        s.sld_profile()
        # ask for the parameters
        for p in flatten(s.parameters):
            assert_(isinstance(p, Parameter))

        # s.solvent is not None
        s.solvent = self.solvent
        # calculate an SLD profile
        s.sld_profile()
Exemple #4
0
def _calculate_constraints(i, objective):
    # builds constraints strings for an objective which has a local variable
    # name of objective_{i}
    all_pars = list(flatten(objective.parameters))
    var_pars = objective.varying_parameters()

    non_var_pars = [p for p in all_pars if p not in var_pars]

    # now get parameters with constraints
    con_pars = [par for par in non_var_pars if par.constraint is not None]
    tab = "    "

    constrain_strings = [
        tab + "parameters = list(flatten("
        "objective_{}.parameters))".format(i)
    ]
    for con_par in con_pars:
        idx = all_pars.index(con_par)
        con_tree = constraint_tree(con_par.constraint)
        for j, v in enumerate(con_tree):
            if v in operators:
                con_tree[j] = operators[v]
            elif v in all_pars:
                con_tree[j] = "parameters[{}]".format(all_pars.index(v))
            else:
                con_tree[j] = repr(v)
        s = ", ".join(con_tree)
        constraint = "build_constraint_from_tree([" + s + "])"
        item = tab + "parameters[{}].constraint = {}".format(idx, constraint)

        constrain_strings.append(item)

    return constrain_strings
Exemple #5
0
    def _interfaces_get(self):
        repeats = round(abs(self.repeats.value))
        interfaces = list(flatten([i.interfaces for i in self.data]))

        if repeats > 1:
            interfaces = interfaces * repeats

        return interfaces
Exemple #6
0
    def test_structure_construction(self):
        # structures are constructed by or-ing slabs
        # test that the slab representation is correct
        assert_equal(
            self.s.slabs(),
            np.array([[0, 0, 0, 0, 0], [100, 3.47, 0, 5, 0],
                      [0, 6.36, 0, 4, 0]]))

        self.s[1] = SLD(3.47 + 1j, name='sio2')(100, 5)
        self.s[1].vfsolv.value = 0.9

        oldpars = len(list(flatten(self.s.parameters)))

        # slabs have solvent penetration
        self.s.solvent = SLD(5 + 1.2j)
        sld = 5 * 0.9 + 0.1 * 3.47
        sldi = 1 * 0.1 + 0.9 * 1.2
        assert_almost_equal(
            self.s.slabs(),
            np.array([[0, 0, 0, 0, 0], [100, sld, sldi, 5, 0.9],
                      [0, 6.36, 0, 4, 0]]))

        # when the structure._solvent is not None, but an SLD object, then
        # it's number of parameters should increase by 2.
        newpars = len(list(flatten(self.s.parameters)))
        assert_equal(newpars, oldpars + 2)

        # by default solvation is done by backing medium
        self.s.solvent = None
        sld = 6.36 * 0.9 + 0.1 * 3.47
        sldi = 1 * 0.1
        assert_almost_equal(
            self.s.slabs(),
            np.array([[0, 0, 0, 0, 0], [100, sld, sldi, 5, 0.9],
                      [0, 6.36, 0, 4, 0]]))

        # by default solvation is done by backing medium, except when structure
        # is reversed
        self.s.reverse_structure = True
        sld = 0 * 0.9 + 0.1 * 3.47
        sldi = 0 * 0.9 + 1 * 0.1
        assert_almost_equal(
            self.s.slabs(),
            np.array([[0, 6.36, 0, 0, 0], [100, sld, sldi, 4, 0.9],
                      [0, 0, 0, 5, 0]]))
Exemple #7
0
    def __repr__(self):
        s = list()
        s.append("{:_>80}".format(''))
        s.append("Parameters: {0: ^15}".format(repr(self.name)))

        for el in self._pprint():
            s.append(el)

        return '\n'.join(list(flatten(s)))
Exemple #8
0
 def names(self):
     """
     Returns
     -------
     names : list
         A list of all the names of all the :class:`Parameter` contained in
         this object.
     """
     return [param.name for param in flatten(self.data)]
Exemple #9
0
    def pvals(self, pvals):
        varying = [param for param in f_unique(flatten(self.data))
                   if param.vary]
        if np.size(pvals) == len(varying):
            [setattr(param, 'value', pvals[i]) for i, param
             in enumerate(varying)]
            return

        flattened_parameters = list(flatten(self.data))

        if np.size(pvals) == len(flattened_parameters):
            [setattr(param, 'value', pvals[i]) for i, param
             in enumerate(flattened_parameters)]
            return
        else:
            raise ValueError("You supplied the wrong number of values %d when "
                             "setting this Parameters.pvals attribute"
                             % len(pvals))
Exemple #10
0
 def constrained_parameters(self):
     """
     Returns
     -------
     constrained_parameters : list
         A list of unique :class:`Parameter` contained in this object that
         have constraints.
     """
     return [param for param in f_unique(flatten(self.data))
             if param.constraint is not None]
Exemple #11
0
    def nvary(self):
        """
        Returns
        -------
        nvary : int
            The number of :class:`Parameter` contained in this object that are
            allowed to vary.

        """
        return len([1 for param in f_unique(flatten(self.data)) if param.vary])
Exemple #12
0
def constraint_tree(expr):
    """
    builds a mathematical tree of a constraint expression
    this can be fed into build_constraint_from_tree to
    reconstitute a constraint
    """
    if isinstance(expr, Parameter):
        return [expr]
    if isinstance(expr, Constant):
        return [expr]
    return list(flatten(_constraint_tree_helper(expr)))
Exemple #13
0
 def dependencies(self):
     dep_list = []
     for _dep in self._deps:
         if isinstance(_dep, Parameter):
             if _dep.constraint is not None:
                 dep_list.append(_dep.dependencies())
             else:
                 dep_list.append(_dep)
         if isinstance(_dep, (_UnaryOp, _BinaryOp)):
             dep_list.append(_dep.dependencies())
     return list(flatten(dep_list))
Exemple #14
0
    def flattened(self, unique=False):
        """
        A list of all the :class:`Parameter` contained in this object,
        including those contained within :class:`Parameters` at any depth.

        Parameters
        ----------
        unique : bool
            The list will only contain unique objects.

        Returns
        -------
        params : list
            A list of :class:`Parameter` contained in this object.

        """
        if unique:
            return list(f_unique(flatten(self.data)))
        else:
            return list(flatten(self.data))
Exemple #15
0
    def logp(self):
        """
        Calculates logp for all the parameters

        Returns
        -------
        logp : float
            Log probability for all the parameters
        """
        # logp for all the parameters
        return np.sum([param.logp() for param in f_unique(flatten(self.data))
                       if param.vary])
    def _on_slab_varies_modified(self, change):
        d = self.param_widgets_link
        slab = self.slab

        for par in flatten(slab.parameters):
            if id(par) in d and change["owner"] in d[id(par)]:
                wids = d[id(par)]
                par.vary = wids[1].value
                break

        self.param_being_varied = change["owner"]
        self.view_changed = time.time()
Exemple #17
0
def plot_corner(objective, samples):
    labels = []

    for i in flatten(objective.parameters.varying_parameters()):
        labels.append(i.name)

    fig = corner.corner(samples,
                        labels=labels,
                        quantiles=[0.025, 0.5, 0.975],
                        show_titles=True,
                        title_kwargs={"fontsize": 12})

    return fig
    def refresh(self):
        """
        Updates the widget values from the underlying `Slab` parameters.
        """
        d = self.param_widgets_link

        ids = {id(p): p for p in flatten(self.slab.parameters) if id(p) in d}
        for idx, par in ids.items():
            widgets = d[idx]
            widgets[0].value = par.value
            widgets[1].value = par.vary
            widgets[2].value = par.bounds.lb
            widgets[3].value = par.bounds.ub
Exemple #19
0
    def varying_parameters(self):
        """
        Unique list of varying parameters

        Returns
        -------
        p : list
            Unique list of varying parameters
        """
        p = [param for param in f_unique(flatten(self.data)) if param.vary]
        q = Parameters()
        q.data = p
        return q
Exemple #20
0
    def varying_parameters(self):
        """
        Returns
        -------
        varying_parameters : refnx.analysis.Parameters
            The varying Parameter objects allowed to vary during the fit.

        """
        # create and return a Parameters object because it has the
        # __array__ method, which allows one to quickly get numerical values.
        p = Parameters()
        p.data = list(f_unique(p for p in flatten(self.parameters) if p.vary))
        return p
    def _on_slab_limits_modified(self, change):
        slab = self.slab
        d = self.param_widgets_link

        for par in flatten(slab.parameters):
            if id(par) in d and change["owner"] in d[id(par)]:
                wids = d[id(par)]
                loc = wids.index(change["owner"])
                if loc == 2:
                    par.bounds.lb = wids[loc].value
                    break
                elif loc == 3:
                    par.bounds.ub = wids[loc].value
                    break
                else:
                    return
Exemple #22
0
    def _on_slab_params_modified(self, change):
        d = self.param_widgets_link
        slab = self.slab

        for par in flatten(slab.parameters):
            if id(par) in d and change['owner'] in d[id(par)]:
                wids = d[id(par)]
                loc = wids.index(change['owner'])
                if loc == 0:
                    par.value = wids[0].value
                    break
                elif loc == 1:
                    par.vary = wids[1].value
                    break

        self.param_being_varied = change['owner']
        self.view_changed = time.time()
Exemple #23
0
    def __init__(self, data, model, parent=QtCore.QModelIndex(), flat=True):
        """
        Parameters
        ----------
        flat : bool
            If `flat is True`, then this superclass will flatten out all
            the parameters in a model and append them as child items.
        """
        super(ComponentNode, self).__init__(data, model, parent)

        if flat:
            for par in flatten(data.parameters):
                pn = ParNode(par, model, parent=self)
                self.appendChild(pn)
        else:
            for p in data.parameters:
                if isinstance(p, Parameters):
                    n = ParametersNode(p, model, self)
                if isinstance(p, Parameter):
                    n = ParNode(p, model, self)
                self.appendChild(n)
Exemple #24
0
    def test_materialsld(self):
        p = MaterialSLD("SiO2", density=2.2, name="silica")
        sldc = complex(p)
        assert_allclose(sldc.real, 3.4752690258246504)
        assert_allclose(sldc.imag, 1.0508799522721932e-05)
        assert p.probe == "neutron"

        # is X-ray SLD correct?
        p.wavelength = 1.54
        p.probe = "x-ray"
        sldc = complex(p)
        assert_allclose(sldc.real, 18.864796064009866)
        assert_allclose(sldc.imag, 0.2436013463223236)

        assert len(p.parameters) == 1
        assert p.formula == "SiO2"

        # the density value should change the SLD
        p.probe = "neutron"
        p.density.value = 4.4
        sldc = complex(p)
        assert_allclose(sldc.real, 3.4752690258246504 * 2)
        assert_allclose(sldc.imag, 1.0508799522721932e-05 * 2)

        # should be able to make a Slab from MaterialSLD
        slab = p(10, 3)
        assert isinstance(slab, Slab)
        slab = Slab(10, p, 3)
        assert isinstance(slab, Slab)

        # make a full structure and check that the reflectivity calc works
        air = SLD(0)
        sio2 = MaterialSLD("SiO2", density=2.2)
        si = MaterialSLD("Si", density=2.33)
        s = air | sio2(10, 3) | si(0, 3)
        s.reflectivity(np.linspace(0.005, 0.3, 100))

        p = s.parameters
        assert len(list(flatten(p))) == 5 + 4 + 4
Exemple #25
0
    def _micro_slabs(self, slice_size=0.5):
        """
        Creates a microslab representation of the Structure.

        Parameters
        ----------
        slice_size : float
            Thickness of each slab in the micro-slab representation

        Returns
        -------
        micro_slabs : np.ndarray
            The micro-slab representation of the model. See the
            `Structure.slabs` method for a description of the array.
        """
        # solvate the slabs from each component
        sl = [c.slabs(structure=self) for c in self.components]
        total_slabs = np.concatenate(sl)
        total_slabs[1:-1] = self.overall_sld(total_slabs[1:-1], self.solvent)

        total_slabs[:, 0] = np.fabs(total_slabs[:, 0])
        total_slabs[:, 3] = np.fabs(total_slabs[:, 3])

        # interfaces between all the slabs
        _interfaces = self.interfaces
        erf_interface = Erf()
        i = 0
        # the default Interface is None.
        # The Component.interfaces property may not have the same length as the
        # Component.slabs. Expand it so it matches the number of slabs,
        # otherwise the calculation of microslabs fails.
        for _interface, _slabs in zip(_interfaces, sl):
            if _interface is None or isinstance(_interface, Interface):
                f = _interface or erf_interface
                _interfaces[i] = [f] * len(_slabs)
            i += 1

        _interfaces = list(flatten(_interfaces))
        _interfaces = [erf_interface if i is None else i for i in _interfaces]

        # distance of each interface from the fronting interface
        dist = np.cumsum(total_slabs[:-1, 0])

        # workout how much space the SLD profile should encompass
        zstart = -5. - 8 * total_slabs[1, 3]
        zend = 5. + dist[-1] + 8 * total_slabs[-1, 3]
        nsteps = int((zend - zstart) / slice_size + 1)
        zed = np.linspace(zstart, zend, num=nsteps)

        # the output arrays
        sld = np.ones_like(zed, dtype=float) * total_slabs[0, 1]
        isld = np.ones_like(zed, dtype=float) * total_slabs[0, 2]

        # work out the step in SLD at an interface
        delta_rho = total_slabs[1:, 1] - total_slabs[:-1, 1]
        delta_irho = total_slabs[1:, 2] - total_slabs[:-1, 2]

        # the RMS roughness of each step
        sigma = total_slabs[1:, 3]
        step = Step()

        # accumulate the SLD of each step.
        for i in range(len(total_slabs) - 1):
            f = _interfaces[i + 1]
            if sigma[i] == 0:
                f = step

            p = f(zed, scale=sigma[i], loc=dist[i])
            sld += delta_rho[i] * p
            isld += delta_irho[i] * p

        sld[0] = total_slabs[0, 1]
        isld[0] = total_slabs[0, 2]
        sld[-1] = total_slabs[-1, 1]
        isld[-1] = total_slabs[-1, 2]

        micro_slabs = np.zeros((len(zed), 5), float)
        micro_slabs[:, 0] = zed[1] - zed[0]
        micro_slabs[:, 1] = sld
        micro_slabs[:, 2] = isld

        return micro_slabs
Exemple #26
0
    def slabs(self, **kwds):
        r"""

        Returns
        -------
        slabs : :class:`np.ndarray`
            Slab representation of this structure.
            Has shape (N, 5).

            - slab[N, 0]
               thickness of layer N
            - slab[N, 1]
               *overall* SLD.real of layer N (material AND solvent)
            - slab[N, 2]
               *overall* SLD.imag of layer N (material AND solvent)
            - slab[N, 3]
               roughness between layer N and N-1
            - slab[N, 4]
               volume fraction of solvent in layer N.

        Notes
        -----
        If `Structure.reversed is True` then the slab representation order is
        reversed. The slab order is reversed before the solvation calculation
        is done. I.e. if `Structure.solvent == 'backing'` and
        `Structure.reversed is True` then the material that solvates the system
        is the component in `Structure[0]`, which corresponds to
        `Structure.slab[-1]`.

        """
        if not len(self):
            return None

        if not (isinstance(self.data[-1], Slab)
                and isinstance(self.data[0], Slab)):
            raise ValueError("The first and last Components in a Structure"
                             " need to be Slabs")

        # Each layer can be given a different type of roughness profile
        # that defines transition between successive layers.
        # The default interface is specified by None (= Gaussian roughness)
        interfaces = flatten(self.interfaces)
        if all([i is None for i in interfaces]):
            # if all the interfaces are Gaussian, then simply concatenate
            # the default slabs property of each component.
            sl = [c.slabs(structure=self) for c in self.components]

            try:
                slabs = np.concatenate(sl)
            except ValueError:
                # some of slabs may be None. np can't concatenate arr and None
                slabs = np.concatenate([s for s in sl if s is not None])
        else:
            # there is a non-default interfacial roughness, create a microslab
            # representation
            slabs = self._micro_slabs()

        # if the slab representation needs to be reversed.
        if self.reverse_structure:
            roughnesses = slabs[1:, 3]
            slabs = np.flipud(slabs)
            slabs[1:, 3] = roughnesses[::-1]
            slabs[0, 3] = 0.

        if np.any(slabs[:, 4] > 0):
            # overall SLD is a weighted average of the vfs and slds
            slabs[1:-1] = self.overall_sld(slabs[1:-1], self.solvent)

        if self.contract > 0:
            return _contract_by_area(slabs, self.contract)
        else:
            return slabs
Exemple #27
0
def process_chain(objective, chain, nburn=0, nthin=1, flatchain=False):
    """
    Process the chain produced by a sampler for a given Objective

    Parameters
    ----------
    objective : refnx.analysis.Objective
        The Objective function that the Posterior was sampled for
    chain : array
        The MCMC chain
    nburn : int, optional
        discard this many steps from the start of the chain
    nthin : int, optional
        only accept every `nthin` samples from the chain
    flatchain : bool, optional
        collapse the walkers down into a single dimension.

    Returns
    -------
    [(param, stderr, chain)] : list
        List of (param, stderr, chain) tuples.
        If `isinstance(objective.parameters, Parameters)` then `param` is a
        `Parameter` instance. `param.value`, `param.stderr` and
        `param.chain` will contain the median, stderr and chain samples,
        respectively. Otherwise `param` will be a float representing the
        median of the chain samples.
        `stderr` is the half width of the [15.87, 84.13] spread (similar to
        standard deviation) and `chain` is an array containing the MCMC
        samples for that parameter.

    Notes
    -----
    The chain should have the shape `(iterations, nwalkers, nvary)` or
    `(iterations, ntemps, nwalkers, nvary)` if parallel tempering was
    employed.
    The burned and thinned chain is created via:
    `chain[nburn::nthin]`.
    Note, if parallel tempering is employed, then only the lowest temperature
    of the parallel tempering chain is processed and returned as it
    corresponds to the (lowest energy) target distribution.
    If `flatten is True` then the burned/thinned chain is reshaped and
    `arr.reshape(-1, nvary)` is returned.
    This function has the effect of setting the parameter stderr's.
    """
    chain = chain[nburn::nthin]
    shape = chain.shape
    nvary = shape[-1]

    # nwalkers = shape[1]
    if len(shape) == 4:
        ntemps = shape[1]
    elif len(shape) == 3:
        ntemps = -1

    if ntemps != -1:
        # PTSampler, we require the target distribution in the first row.
        chain = chain[:, 0]

    _flatchain = chain.reshape((-1, nvary))
    if flatchain:
        chain = _flatchain

    flat_params = list(f_unique(flatten(objective.parameters)))
    varying_parameters = objective.varying_parameters()

    # set the stderr of each of the Parameters
    result_list = []
    if np.all([is_parameter(param) for param in flat_params]):
        # zero out all the old parameter stderrs
        for param in flat_params:
            param.stderr = None
            param.chain = None

        # do the error calcn for the varying parameters and set the chain
        quantiles = np.percentile(_flatchain, [15.87, 50, 84.13], axis=0)
        for i, param in enumerate(varying_parameters):
            std_l, median, std_u = quantiles[:, i]
            param.value = median
            param.stderr = 0.5 * (std_u - std_l)

            # copy in the chain
            param.chain = np.copy(chain[..., i])
            res = MCMCResult(
                name=param.name,
                param=param,
                median=param.value,
                stderr=param.stderr,
                chain=param.chain,
            )
            result_list.append(res)

        fitted_values = np.array(varying_parameters)

        # give each constrained param a chain (to be reshaped later)
        constrained_params = [
            param for param in flat_params if param.constraint is not None
        ]

        for constrain_param in constrained_params:
            constrain_param.chain = np.empty(chain.shape[:-1], float)

        # now iterate through the varying parameters, set the values, thereby
        # setting the constraint value
        if len(constrained_params):
            for index in np.ndindex(chain.shape[:-1]):
                # iterate over parameter vectors
                pvals = chain[index]
                objective.setp(pvals)

                for constrain_param in constrained_params:
                    constrain_param.chain[index] = constrain_param.value

            for constrain_param in constrained_params:
                quantiles = np.percentile(constrain_param.chain,
                                          [15.87, 50, 84.13])

                std_l, median, std_u = quantiles
                constrain_param.value = median
                constrain_param.stderr = 0.5 * (std_u - std_l)

        # now reset fitted parameter values (they would've been changed by
        # constraints calculations
        objective.setp(fitted_values)

    # the parameter set are not Parameter objects, an array was probably
    # being used with BaseObjective.
    else:
        for i in range(nvary):
            c = np.copy(chain[..., i])
            median, stderr = uncertainty_from_chain(c)
            res = MCMCResult(name="",
                             param=median,
                             median=median,
                             stderr=stderr,
                             chain=c)
            result_list.append(res)

    return result_list
Exemple #28
0
    def fit(self, method="L-BFGS-B", target="nll", verbose=True, **kws):
        """
        Obtain the maximum log-likelihood, or log-posterior, estimate (mode)
        of the objective. Maximising the log-likelihood is equivalent to
        minimising chi2 in a least squares fit.

        Parameters
        ----------
        method : str
            which method to use for the optimisation. One of:

            - `'least_squares'`: :func:`scipy.optimize.least_squares`.
            - `'L-BFGS-B'`: L-BFGS-B.
            - `'differential_evolution'`:
              :func:`scipy.optimize.differential_evolution`
            - `'dual_annealing'`:
              :func:`scipy.optimize.dual_annealing` (SciPy >= 1.2.0)
            - `'shgo'`: :func:`scipy.optimize.shgo` (SciPy >= 1.2.0)

            You can also choose many of the minimizers from
            :func:`scipy.optimize.minimize`.

        target : {'nll', 'nlpost'}, optional
            Minimize the negative log-likelihood (`'nll'`) or the negative
            log-posterior (`'nlpost'`). This is equivalent to maximising the
            likelihood or posterior probabilities respectively.
            Maximising the likelihood is equivalent to minimising chi^2 in a
            least-squares fit.
            This option only applies to the `differential_evolution`, `shgo`,
            `dual_annealing` or `L-BFGS-B` methods.
            These optimisers require lower and upper (box) bounds for each
            parameter. If the `Bounds` on a parameter are not an `Interval`,
            but a `PDF` specifying a statistical distribution, then the lower
            and upper bounds are approximated as
            ``PDF.rv.ppf([0.005, 0.995])``, covering 99 % of the statistical
            distribution.
        verbose : bool, optional
            Gives fitting progress. To see a progress bar tqdm has to be
            installed.
        kws : dict
            Additional arguments are passed to the underlying minimization
            method.

        Returns
        -------
        result, covar : :class:`scipy.optimize.OptimizeResult`, np.ndarray
            `result.x` contains the best fit parameters
            `result.covar` is the covariance matrix for the fit.
            `result.stderr` is the uncertainties on each of the fit parameters.

        Notes
        -----
        If the `objective` supplies a `residuals` method then `least_squares`
        can be used. Otherwise the `nll` method of the `objective` is
        minimised. Use this method just before a sampling run.
        If `self.objective.parameters` is a `Parameters` instance, then each
        of the varying parameters has its value updated by the fit, and each
        `Parameter` has a `stderr` attribute which represents the uncertainty
        on the fit parameter.

        The use of `dual annealing` and `shgo` requires that `scipy >= 1.2.0`
        be installed.

        """
        _varying_parameters = self.objective.varying_parameters()
        init_pars = np.array(_varying_parameters)

        _min_kws = {}
        _min_kws.update(kws)
        _bounds = bounds_list(self.objective.varying_parameters())
        _min_kws["bounds"] = _bounds

        # setup callback default
        _min_kws.setdefault("callback", None)

        cost = self.objective.nll
        if target == "nlpost":
            cost = self.objective.nlpost

        # a decorator for the progress bar updater
        def _callback_wrapper(callback_func, pbar):
            def callback(*args, **kwds):
                pbar.update(1)
                if callback_func is None:
                    return None
                else:
                    return callback_func(*args, **kwds)

            return callback

        # least_squares Trust Region Reflective by default
        if method == "least_squares":
            b = np.array(_bounds)
            _min_kws["bounds"] = (b[..., 0], b[..., 1])

            # least_squares doesn't have a callback
            _min_kws.pop("callback", None)

            res = least_squares(self.objective.residuals, init_pars,
                                **_min_kws)
        # differential_evolution, dual_annealing, shgo require lower and upper
        # bounds
        elif method in ["differential_evolution", "dual_annealing", "shgo"]:
            mini = getattr(sciopt, method)
            with get_progress_bar(verbose, None) as pbar:
                _min_kws["callback"] = _callback_wrapper(
                    _min_kws["callback"], pbar)

                res = mini(cost, **_min_kws)
        else:
            # otherwise stick it to minimizer. Default being L-BFGS-B
            _min_kws["method"] = method
            _min_kws["bounds"] = _bounds

            with get_progress_bar(verbose, None) as pbar:
                _min_kws["callback"] = _callback_wrapper(
                    _min_kws["callback"], pbar)

                res = minimize(cost, init_pars, **_min_kws)

        # OptimizeResult.success may not be present (dual annealing)
        if hasattr(res, "success") and res.success:
            self.objective.setp(res.x)

            # Covariance matrix estimation
            covar = self.objective.covar()
            errors = np.sqrt(np.diag(covar))
            res["covar"] = covar
            res["stderr"] = errors

            # check if the parameters are all Parameter instances.
            flat_params = list(f_unique(flatten(self.objective.parameters)))
            if np.all([is_parameter(param) for param in flat_params]):
                # zero out all the old parameter stderrs
                for param in flat_params:
                    param.stderr = None
                    param.chain = None

                for i, param in enumerate(_varying_parameters):
                    param.stderr = errors[i]

            # need to touch up the output to check we leave
            # parameters as we found them
            self.objective.setp(res.x)

        return res
Exemple #29
0
    def sample(
        self,
        steps,
        nthin=1,
        random_state=None,
        f=None,
        callback=None,
        verbose=True,
        pool=-1,
    ):
        """
        Performs sampling from the objective.

        Parameters
        ----------
        steps : int
            Collect `steps` samples into the chain. The sampler will run a
            total of `steps * nthin` moves.
        nthin : int, optional
            Each chain sample is separated by `nthin` iterations.
        random_state : {int, `np.random.RandomState`, `np.random.Generator`}
            If `random_state` is not specified the `~np.random.RandomState`
            singleton is used.
            If `random_state` is an int, a new ``RandomState`` instance is
            used, seeded with random_state.
            If `random_state` is already a ``RandomState`` or a ``Generator``
            instance, then that object is used.
            Specify `random_state` for repeatable minimizations.
        f : file-like or str
            File to incrementally save chain progress to. Each row in the file
            is a flattened array of size `(nwalkers, ndim)` or
            `(ntemps, nwalkers, ndim)`. There are `steps` rows in the
            file.
        callback : callable
            callback function to be called at each iteration step. Has the
            signature `callback(coords, logprob)`.
        verbose : bool, optional
            Gives updates on the sampling progress
        pool : int or map-like object, optional
            If `pool` is an `int` then it specifies the number of threads to
            use for parallelization. If `pool == -1`, then all CPU's are used.
            If pool is a map-like callable that follows the same calling
            sequence as the built-in map function, then this pool is used for
            parallelisation.

        Notes
        -----
        Please see :class:`emcee.EnsembleSampler` for its detailed behaviour.

        >>> # we'll burn the first 500 steps
        >>> fitter.sample(500)
        >>> # after you've run those, then discard them by resetting the
        >>> # sampler.
        >>> fitter.sampler.reset()
        >>> # Now collect 40 steps, each step separated by 50 sampler
        >>> # generations.
        >>> fitter.sample(40, nthin=50)

        One can also burn and thin in `Curvefitter.process_chain`.
        """
        self._check_vars_unchanged()

        # setup a random number generator
        rng = check_random_state(random_state)

        if self._state is None:
            self.initialise(random_state=rng)

        # for saving progress to file
        def _callback_wrapper(state, h=None):
            if callback is not None:
                callback(state.coords, state.log_prob)

            if h is not None:
                h.write(" ".join(map(str, state.coords.ravel())))
                h.write("\n")

        # remove chains from each of the parameters because they slow down
        # pickling but only if they are parameter objects.
        flat_params = f_unique(flatten(self.objective.parameters))
        flat_params = [param for param in flat_params if is_parameter(param)]
        # zero out all the old parameter stderrs
        for param in flat_params:
            param.stderr = None
            param.chain = None

        # make sure the checkpoint file exists
        if f is not None:
            with possibly_open_file(f, "w") as h:
                # write the shape of each step of the chain
                h.write("# ")
                shape = self._state.coords.shape
                h.write(", ".join(map(str, shape)))
                h.write("\n")

        # set the random state of the sampler
        # normally one could give this as an argument to the sample method
        # but PTSampler didn't historically accept that...
        if isinstance(rng, np.random.RandomState):
            rstate0 = rng.get_state()
            self._state.random_state = rstate0
            self.sampler.random_state = rstate0

        # using context manager means we kill off zombie pool objects
        # but does mean that the pool has to be specified each time.
        with MapWrapper(pool) as g, possibly_open_file(f, "a") as h:
            # these kwargs are provided to the sampler.sample method
            kwargs = {"iterations": steps, "thin": nthin}

            # if you're not creating more than 1 thread, then don't bother with
            # a pool.
            if isinstance(self.sampler, emcee.EnsembleSampler):
                if pool == 1:
                    self.sampler.pool = None
                else:
                    self.sampler.pool = g
            else:
                kwargs["mapper"] = g

            # new emcee arguments
            sampler_args = getargspec(self.sampler.sample).args
            if "progress" in sampler_args and verbose:
                kwargs["progress"] = True
                verbose = False

            if "thin_by" in sampler_args:
                kwargs["thin_by"] = nthin
                kwargs.pop("thin", 0)

            # perform the sampling
            for state in self.sampler.sample(self._state, **kwargs):
                self._state = state
                _callback_wrapper(state, h=h)

        if isinstance(self.sampler, emcee.EnsembleSampler):
            self.sampler.pool = None

        # sets parameter value and stderr
        return process_chain(self.objective, self.chain)
Exemple #30
0
 def nDim(self):
     if self.nDimensions is None:
         self.nDimensions = len(list(p for p
                  in f_unique(flatten(self.objective.parameters)) if p.vary ))
     return self.nDimensions