Exemple #1
0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
              check_finite=True, bounds=(-np.inf, np.inf), method=None,
              jac=None, **kwargs):
    if p0 is None:
        # determine number of parameters by inspecting the function
        from scipy._lib._util import getargspec_no_self as _getargspec
        args, varargs, varkw, defaults = _getargspec(f)
        if len(args) < 2:
            raise ValueError("Unable to determine number of fit parameters.")
        n = len(args) - 1
    else:
        p0 = np.atleast_1d(p0)
        n = p0.size

    lb, ub = prepare_bounds(bounds, n)
    if p0 is None:
        p0 = mp._initialize_feasible(lb, ub)

    bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
    if method is None:
        if bounded_problem:
            method = 'trf'
        else:
            method = 'lm'

    if method == 'lm' and bounded_problem:
        raise ValueError("Method 'lm' only works for unconstrained problems. "
                         "Use 'trf' or 'dogbox' instead.")

    # NaNs can not be handled
    if check_finite:
        ydata = np.asarray_chkfinite(ydata)
    else:
        ydata = np.asarray(ydata)

    if isinstance(xdata, (list, tuple, np.ndarray)):
        # `xdata` is passed straight to the user-defined `f`, so allow
        # non-array_like `xdata`.
        if check_finite:
            xdata = np.asarray_chkfinite(xdata)
        else:
            xdata = np.asarray(xdata)

    weights = 1.0 / np.asarray(sigma) if sigma is not None else None
    func = mp._wrap_func(f, xdata, ydata, weights)
    if callable(jac):
        jac = mp._wrap_jac(jac, xdata, weights)
    elif jac is None and method != 'lm':
        jac = '2-point'

    # Remove full_output from kwargs, otherwise we're passing it in twice.
    return_full = kwargs.pop('full_output', False)
    if method == 'lm':
        res = mp.leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
        popt, pcov, infodict, errmsg, ier = res
        cost = np.sum(infodict['fvec'] ** 2)
    else:
        res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
                            **kwargs)

        cost = 2 * res.cost  # res.cost is half sum of squares!
        popt = res.x

        # Do Moore-Penrose inverse discarding zero singular values.
        _, s, VT = svd(res.jac, full_matrices=False)
        threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
        s = s[s > threshold]
        VT = VT[:s.size]
        pcov = np.dot(VT.T / s**2, VT)
        # infodict = dict(nfev=res.nfev, fvec=res.fun, fjac=res.jac, ipvt=None,
        #                 qtf=None)
        infodict = None
        ier = res.status
        errmsg = res.message
    if ier not in [1, 2, 3, 4]:
        raise RuntimeError("Optimal parameters not found: " + errmsg)

    warn_cov = False
    if pcov is None:
        # indeterminate covariance
        pcov = np.zeros((len(popt), len(popt)), dtype=float)
        pcov.fill(np.inf)
        warn_cov = True
    elif not absolute_sigma:
        if ydata.size > p0.size:
            s_sq = cost / (ydata.size - p0.size)
            pcov = pcov * s_sq
        else:
            pcov.fill(np.inf)
            warn_cov = True

    if warn_cov:
        warnings.warn('Covariance of the parameters could not be estimated',
                      category=OptimizeWarning)

    if return_full:
        return popt, pcov, infodict, errmsg, ier
    else:
        return popt, pcov
Exemple #2
0
def curve_fit_2(f, strg,xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
              check_finite=True, bounds=(-np.inf, np.inf), method=None,
              **kwargs):
    """
    Use non-linear least squares to fit a function, f, to data.

    Assumes ``ydata = f(xdata, *params) + eps``

    Parameters
    ----------
    f : callable
        The model function, f(x, ...).  It must take the independent
        variable as the first argument and the parameters to fit as
        separate remaining arguments.
    xdata : An M-length sequence or an (k,M)-shaped array
        for functions with k predictors.
        The independent variable where the data is measured.
    ydata : M-length sequence
        The dependent data --- nominally f(xdata, ...)
    p0 : None, scalar, or N-length sequence, optional
        Initial guess for the parameters.  If None, then the initial
        values will all be 1 (if the number of parameters for the function
        can be determined using introspection, otherwise a ValueError
        is raised).
    sigma : None or M-length sequence, optional
        If not None, the uncertainties in the ydata array. These are used as
        weights in the least-squares problem
        i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``
        If None, the uncertainties are assumed to be 1.
    absolute_sigma : bool, optional
        If False, `sigma` denotes relative weights of the data points.
        The returned covariance matrix `pcov` is based on *estimated*
        errors in the data, and is not affected by the overall
        magnitude of the values in `sigma`. Only the relative
        magnitudes of the `sigma` values matter.

        If True, `sigma` describes one standard deviation errors of
        the input data points. The estimated covariance in `pcov` is
        based on these values.
    check_finite : bool, optional
        If True, check that the input arrays do not contain nans of infs,
        and raise a ValueError if they do. Setting this parameter to
        False may silently produce nonsensical results if the input arrays
        do contain nans. Default is True.
    bounds : 2-tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each element of the tuple must be either an array with the length equal
        to the number of parameters, or a scalar (in which case the bound is
        taken to be the same for all parameters.) Use ``np.inf`` with an
        appropriate sign to disable bounds on all or some parameters.

        .. versionadded:: 0.17
    method : {'lm', 'trf', 'dogbox'}, optional
        Method to use for optimization.  See `least_squares` for more details.
        Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
        provided. The method 'lm' won't work when the number of observations
        is less than the number of variables, use 'trf' or 'dogbox' in this
        case.

        .. versionadded:: 0.17
    kwargs
        Keyword arguments passed to `leastsq` for ``method='lm'`` or
        `least_squares` otherwise.

    Returns
    -------
    popt : array
        Optimal values for the parameters so that the sum of the squared error
        of ``f(xdata, *popt) - ydata`` is minimized
    pcov : 2d array
        The estimated covariance of popt. The diagonals provide the variance
        of the parameter estimate. To compute one standard deviation errors
        on the parameters use ``perr = np.sqrt(np.diag(pcov))``.

        How the `sigma` parameter affects the estimated covariance
        depends on `absolute_sigma` argument, as described above.

        If the Jacobian matrix at the solution doesn't have a full rank, then
        'lm' method returns a matrix filled with ``np.inf``, on the other hand
        'trf'  and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
        the covariance matrix.

    Raises
    ------
    OptimizeWarning
        if covariance of the parameters can not be estimated.

    ValueError
        if either `ydata` or `xdata` contain NaNs.

    See Also
    --------
    least_squares : Minimize the sum of squares of nonlinear functions.
    stats.linregress : Calculate a linear least squares regression for two sets
                       of measurements.

    Notes
    -----
    With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
    through `leastsq`. Note that this algorithm can only deal with
    unconstrained problems.

    Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
    the docstring of `least_squares` for more information.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize import curve_fit
    >>> def func(x, a, b, c):
    ...     return a * np.exp(-b * x) + c

    >>> xdata = np.linspace(0, 4, 50)
    >>> y = func(xdata, 2.5, 1.3, 0.5)
    >>> ydata = y + 0.2 * np.random.normal(size=len(xdata))

    >>> popt, pcov = curve_fit(func, xdata, ydata)

    Constrain the optimization to the region of ``0 < a < 3``, ``0 < b < 2``
    and ``0 < c < 1``:

    >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 2., 1.]))

    """
    if p0 is None:
        # determine number of parameters by inspecting the function
        from scipy._lib._util import getargspec_no_self as _getargspec
        args, varargs, varkw, defaults = _getargspec(f)
        if len(args) < 2:
            raise ValueError("Unable to determine number of fit parameters.")
        n = len(args) - 1
    else:
        p0 = np.atleast_1d(p0)
        n = p0.size

    lb, ub = minpack.prepare_bounds(bounds, n)
    if p0 is None:
        p0 = minpack._initialize_feasible(lb, ub)

    bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
    if method is None:
        if bounded_problem:
            method = 'trf'
        else:
            method = 'lm'

    if method == 'lm' and bounded_problem:
        raise ValueError("Method 'lm' only works for unconstrained problems. "
                         "Use 'trf' or 'dogbox' instead.")

    # NaNs can not be handled
    if check_finite:
        ydata = np.asarray_chkfinite(ydata)
    else:
        ydata = np.asarray(ydata)

    if isinstance(xdata, (list, tuple, np.ndarray)):
        # `xdata` is passed straight to the user-defined `f`, so allow
        # non-array_like `xdata`.
        if check_finite:
            xdata = np.asarray_chkfinite(xdata)
        else:
            xdata = np.asarray(xdata)

    args = (xdata, ydata, f, strg)
    if sigma is None:
        func = _general_function
    else:
        func = minpack._weighted_general_function
        args += (1.0 / asarray(sigma),)

    if method == 'lm':
        # Remove full_output from kwargs, otherwise we're passing it in twice.
        return_full = kwargs.pop('full_output', False)
        res = leastsq(func, p0, args=args, full_output=1, **kwargs)
        popt, pcov, infodict, errmsg, ier = res
        cost = np.sum(infodict['fvec'] ** 2)
        if ier not in [1, 2, 3, 4]:
            raise RuntimeError("Optimal parameters not found: " + errmsg)
    else:
        res = minpack.least_squares(func, p0, args=args, bounds=bounds, method=method,**kwargs)
        if res==False:
            return [0],[0],res,0
        if not res.success:
            return res.x,[0],True, res.nfev
            #raise RuntimeError("Optimal parameters not found: " + res.message)

        cost = 2 * res.cost  # res.cost is half sum of squares!
        popt = res.x

        # Do Moore-Penrose inverse discarding zero singular values.
        _, s, VT = minpack.svd(res.jac, full_matrices=False)
        threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
        s = s[s > threshold]
        VT = VT[:s.size]
        pcov = np.dot(VT.T / s**2, VT)
        return_full = False

    warn_cov = False
    if pcov is None:
        # indeterminate covariance
        pcov = zeros((len(popt), len(popt)), dtype=float)
        pcov.fill(inf)
        warn_cov = True
    elif not absolute_sigma:
        if ydata.size > p0.size:
            s_sq = cost / (ydata.size - p0.size)
            pcov = pcov * s_sq
        else:
            pcov.fill(inf)
            warn_cov = True

    if warn_cov:
        minpack.warnings.warn('Covariance of the parameters could not be estimated',
                      category=minpack.OptimizeWarning)

    if return_full:
        return popt, pcov, infodict, errmsg, ier
    else:
        return popt, pcov, res.success, res.nfev
Exemple #3
0
    def _run_least_squares(self, **kwargs):
        p0 = kwargs.pop("p0", self.p0)
        bounds = kwargs.pop("bounds", (-np.inf, np.inf))
        method = kwargs.pop("method", "trf")
        loss = kwargs.pop("loss", "huber")
        max_nfev = kwargs.pop("max_nfev", 10000)
        f_scale = kwargs.pop("f_scale", 0.1)
        jac = kwargs.pop("jac", "2-point")

        #         loss_fcn = _loss_fcns.pop(loss, loss)

        # Copied from `curve_fit` line 704 (20200527)
        if p0 is None:
            # determine number of parameters by inspecting the function
            from scipy._lib._util import getargspec_no_self as _getargspec

            args, varargs, varkw, defaults = _getargspec(self.function)
            if len(args) < 2:
                raise ValueError(
                    "Unable to determine number of fit parameters.")
            n = len(args) - 1
        else:
            p0 = np.atleast_1d(p0)
            n = p0.size

        if isinstance(bounds, dict):
            # Monkey patch to work with bounds being stored as
            # dict for TeX_info. (20201202)
            bounds = [bounds[k] for k in self.argnames]
            bounds = np.array(bounds).T

        # Copied from `curve_fit` line 715 (20200527)
        lb, ub = prepare_bounds(bounds, n)
        if p0 is None:
            p0 = _initialize_feasible(lb, ub)

        if "args" in kwargs:
            raise ValueError(
                "Adopted `curve_fit` convention for which'args' is not a supported keyword argument."
            )

        xdata = self.observations.used.x
        ydata = self.observations.used.y
        sigma = self.observations.used.w

        # Copied from `curve_fit` line 749 (20200527)
        # Determine type of sigma
        if sigma is not None:
            sigma = np.asarray(sigma)
            # sigma = sigma / np.nansum(sigma)

            # if 1-d, sigma are errors, define transform = 1/sigma
            if sigma.shape == (ydata.size, ):
                transform = 1.0 / sigma
            # if 2-d, sigma is the covariance matrix,
            # define transform = L such that L L^T = C
            elif sigma.shape == (ydata.size, ydata.size):
                try:
                    # scipy.linalg.cholesky requires lower=True to return L L^T = A
                    transform = cholesky(sigma, lower=True)
                except LinAlgError:
                    raise ValueError("`sigma` must be positive definite.")
            else:
                raise ValueError("`sigma` has incorrect shape.")
        else:
            transform = None

        # Copied from `curve_fit` line 769 (20200527)
        loss_func = _wrap_func(self.function, xdata, ydata, transform)
        # Already define default `jac` with `kwargs`. Don't need ELSE clause.
        if callable(jac):
            jac = _wrap_jac(jac, xdata, transform)

        res = least_squares(
            loss_func,
            p0,
            jac=jac,
            bounds=bounds,
            method=method,
            loss=loss,
            max_nfev=max_nfev,
            f_scale=f_scale,
            **kwargs,
        )

        if not res.success:
            raise RuntimeError("Optimal parameters not found: " + res.message)

        fit_bounds = np.concatenate([lb, ub]).reshape((2, -1)).T
        fit_bounds = {
            k: FitBounds(*b)
            for k, b in zip(self.argnames, fit_bounds)
        }
        fit_bounds = tuple(fit_bounds.items())
        self._fit_bounds = fit_bounds

        #         self._loss_fcn = loss_fcn
        return res, p0
Exemple #4
0
def curve_fit_m(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
              check_finite=True, bounds=(-np.inf, np.inf), method=None,
              jac=None, **kwargs):
	"""
	Instruction and source of `scipy.optimize.curve_fit` can be found in 
	https://github.com/scipy/scipy/blob/adc4f4f7bab120ccfab9383aba272954a0a12fb0/scipy/optimize/minpack.py#L511-L813
	"""
	if p0 is None:
		# determine number of parameters by inspecting the function
		from ._helpers import funcArgsNr
		n = funcArgsNr(f)-1 #except independent variable
		if n < 1:
				raise ValueError("Unable to determine number of fit parameters.")
	else:
		p0 = np.atleast_1d(p0)
		n = p0.size

	lb, ub = prepare_bounds(bounds, n)
	if p0 is None:
		p0 = _initialize_feasible(lb, ub)
	
	bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
	if method is None:
		if bounded_problem:
				method = 'trf'
		else:
				method = 'lm'

	if method == 'lm' and bounded_problem:
		raise ValueError("Method 'lm' only works for unconstrained problems. "
											"Use 'trf' or 'dogbox' instead.")

	# optimization may produce garbage for float32 inputs, cast them to float64

	# NaNs can not be handled
	if check_finite:
		ydata = np.asarray_chkfinite(ydata, float)
	else:
		ydata = np.asarray(ydata, float)

	if isinstance(xdata, (list, tuple, np.ndarray)):
		# `xdata` is passed straight to the user-defined `f`, so allow
		# non-array_like `xdata`.
		if check_finite:
				xdata = np.asarray_chkfinite(xdata, float)
		else:
				xdata = np.asarray(xdata, float)

	if ydata.size == 0:
		raise ValueError("`ydata` must not be empty!")

	# Determine type of sigma
	if sigma is not None:
		sigma = np.asarray(sigma)

		# if 1-d, sigma are errors, define transform = 1/sigma
		if sigma.shape == (ydata.size, ):
				transform = 1.0 / sigma
		# if 2-d, sigma is the covariance matrix,
		# define transform = L such that L L^T = C
		elif sigma.shape == (ydata.size, ydata.size):
			try:
				# scipy.linalg.cholesky requires lower=True to return L L^T = A
				transform = cholesky(sigma, lower=True)
			except LinAlgError:
				raise ValueError("`sigma` must be positive definite.")
		else:
			raise ValueError("`sigma` has incorrect shape.")
	else:
			transform = None

	func = _wrap_func(f, xdata, ydata, transform)
	if callable(jac):
		jac = _wrap_jac(jac, xdata, transform)
	elif jac is None and method != 'lm':
		jac = '2-point'

	if 'args' in kwargs:
		# The specification for the model function `f` does not support
		# additional arguments. Refer to the `curve_fit` docstring for
		# acceptable call signatures of `f`.
		raise ValueError("'args' is not a supported keyword argument.")

	#print(method)
	if method == 'lm':
		# Remove full_output from kwargs, otherwise we're passing it in twice.
		return_full = kwargs.pop('full_output', False)
		res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
		popt, pcov, infodict, errmsg, ier = res
		ysize = len(infodict['fvec'])
		cost = np.sum(infodict['fvec'] ** 2)
		if ier not in [1, 2, 3, 4]:
			raise RuntimeError("Optimal parameters not found: " + errmsg)
	else:
		# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
		if 'max_nfev' not in kwargs:
			kwargs['max_nfev'] = kwargs.pop('maxfev', None)

		res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, **kwargs)

		if not res.success:
				raise RuntimeError("Optimal parameters not found: " + res.message)

		ysize = len(res.fun)
		cost = 2 * res.cost  # res.cost is half sum of squares!
		popt = res.x

		# Do Moore-Penrose inverse discarding zero singular values.
		_, s, VT = svd(res.jac, full_matrices=False)
		threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
		s = s[s > threshold]
		VT = VT[:s.size]
		pcov = np.dot(VT.T / s**2, VT)
		return_full = False

	warn_cov = False
	if pcov is None:
		# indeterminate covariance
		pcov = zeros((len(popt), len(popt)), dtype=float)
		pcov.fill(inf)
		warn_cov = True
	elif not absolute_sigma:
		if ysize > p0.size:
			s_sq = cost / (ysize - p0.size)
			pcov = pcov * s_sq
		else:
			pcov.fill(inf)
			warn_cov = True

	if warn_cov:
		warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning)
	
	#add `cost` output
	if return_full:
		return popt, pcov, cost, infodict, errmsg, ier
	else:
		return popt, pcov, cost