예제 #1
0
def test_linfit():
    x = N.array([-1.7237128E+00, 1.8712276E+00, -9.6608055E-01,
                 -2.8394297E-01, 1.3416969E+00, 1.3757038E+00,
                 -1.3703436E+00, 4.2581975E-02, -1.4970151E-01,
                 8.2065094E-01])
    y = N.array([1.9000429E-01, 6.5807428E+00, 1.4582725E+00,
                 2.7270851E+00, 5.5969253E+00, 5.6249280E+00,
                 0.787615, 3.2599759E+00, 2.9771762E+00,
                 4.5936475E+00])
    ey = 0.07 * N.ones(y.shape, dtype='float64')
    p0 = N.array([1.0, 1.0], dtype='float64')  # initial conditions
    pactual = N.array([3.2, 1.78])  # actual values used to make data
    parbase = {'value': 0., 'fixed': 0, 'limited': [0, 0], 'limits': [0., 0.]}
    parinfo = []
    for i in range(len(pactual)):
        parinfo.append(copy.deepcopy(parbase))
    for i in range(len(pactual)):
        parinfo[i]['value'] = p0[i]
    fa = {'x': x, 'y': y, 'err': ey}
    m = mpfit(myfunctlin, p0, parinfo=parinfo, functkw=fa)
    if (m.status <= 0):
        print 'error message = ', m.errmsg
    assert N.allclose(
        m.params, N.array([3.20996572, -1.7709542], dtype='float64'))
    assert N.allclose(
        m.perror, N.array([0.02221018, 0.01893756], dtype='float64'))
    chisq = (myfunctlin(m.params, x=x, y=y, err=ey)[1] ** 2).sum()

    assert N.allclose(
        N.array(
            [chisq], dtype='float64'), N.array(
            [2.756284983], dtype='float64'))
    assert m.dof == 8
    return
예제 #2
0
def test_linfit():
    x = N.array([
        -1.7237128E+00, 1.8712276E+00, -9.6608055E-01, -2.8394297E-01,
        1.3416969E+00, 1.3757038E+00, -1.3703436E+00, 4.2581975E-02,
        -1.4970151E-01, 8.2065094E-01
    ])
    y = N.array([
        1.9000429E-01, 6.5807428E+00, 1.4582725E+00, 2.7270851E+00,
        5.5969253E+00, 5.6249280E+00, 0.787615, 3.2599759E+00, 2.9771762E+00,
        4.5936475E+00
    ])
    ey = 0.07 * N.ones(y.shape, dtype='float64')
    p0 = N.array([1.0, 1.0], dtype='float64')  # initial conditions
    pactual = N.array([3.2, 1.78])  # actual values used to make data
    parbase = {'value': 0., 'fixed': 0, 'limited': [0, 0], 'limits': [0., 0.]}
    parinfo = []
    for i in range(len(pactual)):
        parinfo.append(copy.deepcopy(parbase))
    for i in range(len(pactual)):
        parinfo[i]['value'] = p0[i]
    fa = {'x': x, 'y': y, 'err': ey}
    m = mpfit(myfunctlin, p0, parinfo=parinfo, functkw=fa)
    if (m.status <= 0):
        print 'error message = ', m.errmsg
    assert N.allclose(m.params,
                      N.array([3.20996572, -1.7709542], dtype='float64'))
    assert N.allclose(m.perror,
                      N.array([0.02221018, 0.01893756], dtype='float64'))
    chisq = (myfunctlin(m.params, x=x, y=y, err=ey)[1]**2).sum()

    assert N.allclose(N.array([chisq], dtype='float64'),
                      N.array([2.756284983], dtype='float64'))
    assert m.dof == 8
    return
예제 #3
0
def test_rosenbrock():
    p0 = N.array([-1, 1.], dtype='float64')  # initial conditions
    pactual = N.array([1., 1.])  # actual minimum of the rosenbrock function
    m = mpfit(myfunctrosenbrock, p0)
    if (m.status <= 0):
        print 'error message = ', m.errmsg
    assert m.status > 0
    assert N.allclose(m.params, pactual)
    assert N.allclose(m.fnorm, 0)
    return
예제 #4
0
def test_rosenbrock():
    p0 = N.array([-1, 1.], dtype='float64')  # initial conditions
    pactual = N.array([1., 1.])  # actual minimum of the rosenbrock function
    m = mpfit(myfunctrosenbrock, p0)
    if (m.status <= 0):
        print 'error message = ', m.errmsg
    assert m.status > 0
    assert N.allclose(m.params, pactual)
    assert N.allclose(m.fnorm, 0)
    return
예제 #5
0
파일: model.py 프로젝트: AEljarrat/hyperspy
    def fit(self, fitter=None, method='ls', grad=False, weights=None,
            bounded=False, ext_bounding=False, update_plot=False, 
            **kwargs):
        """Fits the model to the experimental data
        
        Parameters
        ----------
        fitter : {None, "leastsq", "odr", "mpfit", "fmin"}
            The optimizer to perform the fitting. If None the fitter
            defined in the Preferences is used. leastsq is the most 
            stable but it does not support bounding. mpfit supports
            bounding. fmin is the only one that supports 
            maximum likelihood estimation, but it is less robust than 
            the Levenberg–Marquardt based leastsq and mpfit, and it is 
            better to use it after one of them to refine the estimation.
        method : {'ls', 'ml'}
            Choose 'ls' (default) for least squares and 'ml' for 
            maximum-likelihood estimation. The latter only works with 
            fitter = 'fmin'.
        grad : bool
            If True, the analytical gradient is used if defined to 
            speed up the estimation. 
        weights : {None, True, numpy.array}
            If None, performs standard least squares. If True 
            performs weighted least squares where the weights are 
            calculated using spectrum.Spectrum.estimate_variance. 
            Alternatively, external weights can be supplied by passing
            a weights array of the same dimensions as the signal.
        ext_bounding : bool
            If True, enforce bounding by keeping the value of the 
            parameters constant out of the defined bounding area.
        bounded : bool
            If True performs bounded optimization if the fitter 
            supports it. Currently only mpfit support bounding. 
        update_plot : bool
            If True, the plot is updated during the optimization 
            process. It slows down the optimization but it permits
            to visualize the optimization progress. 
        
        **kwargs : key word arguments
            Any extra key word argument will be passed to the chosen
            fitter
            
        See Also
        --------
        multifit
            
        """
        if fitter is None:
            fitter = preferences.Model.default_fitter
        switch_aap = (update_plot != self._get_auto_update_plot())
        if switch_aap is True and update_plot is False:
            self._disconnect_parameters2update_plot()
            
        self.p_std = None
        self._set_p0()
        if ext_bounding:
            self._enable_ext_bounding()
        if grad is False :
            approx_grad = True
            jacobian = None
            odr_jacobian = None
            grad_ml = None
            grad_ls = None
        else :
            approx_grad = False
            jacobian = self._jacobian
            odr_jacobian = self._jacobian4odr
            grad_ml = self._gradient_ml
            grad_ls = self._gradient_ls
        if method == 'ml':
            weights = None
        if weights is True:
            if self.spectrum.variance is None:
                self.spectrum.estimate_variance()
            weights = 1. / np.sqrt(self.spectrum.variance.__getitem__(
            self.axes_manager._getitem_tuple)[self.channel_switches])
        elif weights is not None:
            weights = weights.__getitem__(
                self.axes_manager._getitem_tuple)[
                    self.channel_switches]
        args = (self.spectrum()[self.channel_switches], 
        weights)
        
        # Least squares "dedicated" fitters
        if fitter == "leastsq":
            output = \
            leastsq(self._errfunc, self.p0[:], Dfun = jacobian,
            col_deriv=1, args = args, full_output = True, **kwargs)
            
            self.p0 = output[0]
            var_matrix = output[1]
            # In Scipy 0.7 sometimes the variance matrix is None (maybe a 
            # bug?) so...
            if var_matrix is not None:
                self.p_std = np.sqrt(np.diag(var_matrix))
            self.fit_output = output
        
        elif fitter == "odr":
            modelo = odr.Model(fcn = self._function4odr, 
            fjacb = odr_jacobian)
            mydata = odr.RealData(self.axis.axis[self.channel_switches],
            self.spectrum()[self.channel_switches],
            sx = None,
            sy = (1/weights if weights is not None else None))
            myodr = odr.ODR(mydata, modelo, beta0=self.p0[:])
            myoutput = myodr.run()
            result = myoutput.beta
            self.p_std = myoutput.sd_beta
            self.p0 = result
            self.fit_output = myoutput
            
        elif fitter == 'mpfit':
            autoderivative = 1
            if grad is True:
                autoderivative = 0

            if bounded is True:
                self.set_mpfit_parameters_info()
            elif bounded is False:
                self.mpfit_parinfo = None
            m = mpfit(self._errfunc4mpfit, self.p0[:], 
                parinfo=self.mpfit_parinfo, functkw= {
                'y': self.spectrum()[self.channel_switches], 
                'weights' :weights}, autoderivative = autoderivative,
                quiet = 1)
            self.p0 = m.params
            self.p_std = m.perror
            self.fit_output = m
            
        else:          
        # General optimizers (incluiding constrained ones(tnc,l_bfgs_b)
        # Least squares or maximum likelihood
            if method == 'ml':
                tominimize = self._poisson_likelihood_function
                fprime = grad_ml
            elif method == 'ls':
                tominimize = self._errfunc2
                fprime = grad_ls
                        
            # OPTIMIZERS
            
            # Simple (don't use gradient)
            if fitter == "fmin" :
                self.p0 = fmin(
                    tominimize, self.p0, args = args, **kwargs)
            elif fitter == "powell" :
                self.p0 = fmin_powell(tominimize, self.p0, args = args, 
                **kwargs)
            
            # Make use of the gradient
            elif fitter == "cg" :
                self.p0 = fmin_cg(tominimize, self.p0, fprime = fprime,
                args= args, **kwargs)
            elif fitter == "ncg" :
                self.p0 = fmin_ncg(tominimize, self.p0, fprime = fprime,
                args = args, **kwargs)
            elif fitter == "bfgs" :
                self.p0 = fmin_bfgs(
                    tominimize, self.p0, fprime = fprime,
                    args = args, **kwargs)
            
            # Constrainded optimizers
            
            # Use gradient
            elif fitter == "tnc":
                if bounded is True:
                    self.set_boundaries()
                elif bounded is False:
                    self.self.free_parameters_boundaries = None
                self.p0 = fmin_tnc(tominimize, self.p0, fprime = fprime,
                args = args, bounds = self.free_parameters_boundaries, 
                approx_grad = approx_grad, **kwargs)[0]
            elif fitter == "l_bfgs_b":
                if bounded is True:
                    self.set_boundaries()
                elif bounded is False:
                    self.self.free_parameters_boundaries = None
                self.p0 = fmin_l_bfgs_b(tominimize, self.p0,
                    fprime=fprime, args=args, 
                    bounds=self.free_parameters_boundaries, 
                    approx_grad = approx_grad, **kwargs)[0]
            else:
                print \
                """
                The %s optimizer is not available.

                Available optimizers:
                Unconstrained:
                --------------
                Only least Squares: leastsq and odr
                General: fmin, powell, cg, ncg, bfgs

                Cosntrained:
                ------------
                tnc and l_bfgs_b
                """ % fitter
                
        
        if np.iterable(self.p0) == 0:
            self.p0 = (self.p0,)
        self._charge_p0(p_std=self.p_std)
        self.set()
        if ext_bounding is True:
            self._disable_ext_bounding()
        if switch_aap is True and update_plot is False:
            self._connect_parameters2update_plot()
            self.update_plot()            
예제 #6
0
    def fit(self, fitter = None, method = 'ls',
    	    grad = False, weights = None, ext_bounding = False, ascombe = True,
    	    update_plot = False, bounded = False, **kwargs):
        """
        Fits the model to the experimental data using the fitter e
        The covariance matrix calculated by the 'leastsq' fitter is not always
        reliable
        """
        if fitter is None:
            fitter = preferences.Model.default_fitter
            print('Fitter: %s' % fitter)
        switch_aap = (update_plot != self.auto_update_plot)
        if switch_aap is True:
            self.set_auto_update_plot(update_plot)
        self.p_std = None
        self._set_p0()
        if ext_bounding:
            self._enable_ext_bounding()
        if grad is False :
            approx_grad = True
            jacobian = None
            odr_jacobian = None
            grad_ml = None
            grad_ls = None
        else :
            approx_grad = False
            jacobian = self._jacobian
            odr_jacobian = self._jacobian4odr
            grad_ml = self._gradient_ml
            grad_ls = self._gradient_ls
        if method == 'ml':
            weights = None
        if weights is True:
            if self.spectrum.variance is None:
                self.spectrum.estimate_variance()
            weights = 1. / np.sqrt(self.spectrum.variance.__getitem__(
            self.axes_manager._getitem_tuple)[self.channel_switches])
        elif weights is not None:
            weights = weights.__getitem__(self.axes_manager._getitem_tuple)[
            self.channel_switches]
        args = (self.spectrum()[self.channel_switches], 
        weights)
        
        # Least squares "dedicated" fitters
        if fitter == "leastsq":
            output = \
            leastsq(self._errfunc, self.p0[:], Dfun = jacobian,
            col_deriv=1, args = args, full_output = True, **kwargs)
            
            self.p0 = output[0]
            var_matrix = output[1]
            # In Scipy 0.7 sometimes the variance matrix is None (maybe a 
            # bug?) so...
            if var_matrix is not None:
                self.p_std = np.sqrt(np.diag(var_matrix))
            self.fit_output = output
        
        elif fitter == "odr":
            modelo = odr.Model(fcn = self._function4odr, 
            fjacb = odr_jacobian)
            mydata = odr.RealData(self.axis.axis[self.channel_switches],
            self.spectrum()[self.channel_switches],
            sx = None,
            sy = (1/weights if weights is not None else None))
            myodr = odr.ODR(mydata, modelo, beta0=self.p0[:])
            myoutput = myodr.run()
            result = myoutput.beta
            self.p_std = myoutput.sd_beta
            self.p0 = result
            self.fit_output = myoutput
            
        elif fitter == 'mpfit':
            autoderivative = 1
            if grad is True:
                autoderivative = 0

            if bounded is True:
                self.set_mpfit_parameters_info()
            elif bounded is False:
                self.mpfit_parinfo = None
            m = mpfit(self._errfunc4mpfit, self.p0[:], 
            parinfo = self.mpfit_parinfo, functkw= {
                'y': self.spectrum()[self.channel_switches], 
                'weights' :weights}, autoderivative = autoderivative, quiet = 1)
            self.p0 = m.params
            self.p_std = m.perror
            self.fit_output = m
            
        else:          
        # General optimizers (incluiding constrained ones(tnc,l_bfgs_b)
        # Least squares or maximum likelihood
            if method == 'ml':
                tominimize = self._poisson_likelihood_function
                fprime = grad_ml
            elif method == 'ls':
                tominimize = self._errfunc2
                fprime = grad_ls
                        
            # OPTIMIZERS
            
            # Simple (don't use gradient)
            if fitter == "fmin" :
                self.p0 = fmin(tominimize, self.p0, args = args, **kwargs)
            elif fitter == "powell" :
                self.p0 = fmin_powell(tominimize, self.p0, args = args, 
                **kwargs)
            
            # Make use of the gradient
            elif fitter == "cg" :
                self.p0 = fmin_cg(tominimize, self.p0, fprime = fprime,
                args= args, **kwargs)
            elif fitter == "ncg" :
                self.p0 = fmin_ncg(tominimize, self.p0, fprime = fprime,
                args = args, **kwargs)
            elif fitter == "bfgs" :
                self.p0 = fmin_bfgs(tominimize, self.p0, fprime = fprime,
                args = args, **kwargs)
            
            # Constrainded optimizers
            
            # Use gradient
            elif fitter == "tnc":
                if bounded is True:
                    self.set_boundaries()
                elif bounded is False:
                    self.self.free_parameters_boundaries = None
                self.p0 = fmin_tnc(tominimize, self.p0, fprime = fprime,
                args = args, bounds = self.free_parameters_boundaries, 
                approx_grad = approx_grad, **kwargs)[0]
            elif fitter == "l_bfgs_b":
                if bounded is True:
                    self.set_boundaries()
                elif bounded is False:
                    self.self.free_parameters_boundaries = None
                self.p0 = fmin_l_bfgs_b(tominimize, self.p0, fprime = fprime, 
                args =  args,  bounds = self.free_parameters_boundaries, 
                approx_grad = approx_grad, **kwargs)[0]
            else:
                print \
                """
                The %s optimizer is not available.

                Available optimizers:
                Unconstrained:
                --------------
                Only least Squares: leastsq and odr
                General: fmin, powell, cg, ncg, bfgs

                Cosntrained:
                ------------
                tnc and l_bfgs_b
                """ % fitter
                
        
        if np.iterable(self.p0) == 0:
            self.p0 = (self.p0,)
        self._charge_p0(p_std = self.p_std)
        self.set()
        if ext_bounding is True:
            self._disable_ext_bounding()
        if switch_aap is True:
            self.set_auto_update_plot(not update_plot)
            if not update_plot and self.spectrum._plot is not None:
                self.update_plot()