Esempio n. 1
0
def _estimate_gain(ns, cs,
                   weighted=False,
                   higher_than=None,
                   plot_results=False,
                   binning=0,
                   pol_order=1):
    if binning > 0:
        factor = 2 ** binning
        remainder = np.mod(ns.shape[1], factor)
        if remainder != 0:
            ns = ns[:, remainder:]
            cs = cs[:, remainder:]
        new_shape = (ns.shape[0], ns.shape[1] / factor)
        ns = rebin(ns, new_shape)
        cs = rebin(cs, new_shape)

    noise = ns - cs
    variance = np.var(noise, 0)
    average = np.mean(cs, 0).squeeze()

    # Select only the values higher_than for the calculation
    if higher_than is not None:
        sorting_index_array = np.argsort(average)
        average_sorted = average[sorting_index_array]
        average_higher_than = average_sorted > higher_than
        variance_sorted = variance.squeeze()[sorting_index_array]
        variance2fit = variance_sorted[average_higher_than]
        average2fit = average_sorted[average_higher_than]
    else:
        variance2fit = variance
        average2fit = average

    fit = np.polyfit(average2fit, variance2fit, pol_order)
    if weighted is True:
        from hyperspy._signals.spectrum import Spectrum
        from hyperspy.model import Model
        from hyperspy.components import Line
        s = Spectrum(variance2fit)
        s.axes_manager.signal_axes[0].axis = average2fit
        m = Model(s)
        l = Line()
        l.a.value = fit[1]
        l.b.value = fit[0]
        m.append(l)
        m.fit(weights=True)
        fit[0] = l.b.value
        fit[1] = l.a.value

    if plot_results is True:
        plt.figure()
        plt.scatter(average.squeeze(), variance.squeeze())
        plt.xlabel('Counts')
        plt.ylabel('Variance')
        plt.plot(average2fit, np.polyval(fit, average2fit), color='red')
    results = {'fit': fit, 'variance': variance.squeeze(),
               'counts': average.squeeze()}

    return results
Esempio n. 2
0
    def fit(
        self,
        fitter=None,
        method="ls",
        grad=False,
        bounded=False,
        ext_bounding=False,
        update_plot=False,
        kind="std",
        **kwargs
    ):
        """Fits the model to the experimental data

        Parameters
        ----------
        fitter : {None, "leastsq", "odr", "mpfit", "fmin"}
            The optimizer to perform the fitting. If None the fitter
            defined in the Preferences is used. leastsq is the most
            stable but it does not support bounding. mpfit supports
            bounding. fmin is the only one that supports
            maximum likelihood estimation, but it is less robust than
            the Levenberg–Marquardt based leastsq and mpfit, and it is
            better to use it after one of them to refine the estimation.
        method : {'ls', 'ml'}
            Choose 'ls' (default) for least squares and 'ml' for
            maximum-likelihood estimation. The latter only works with
            fitter = 'fmin'.
        grad : bool
            If True, the analytical gradient is used if defined to
            speed up the estimation.
        ext_bounding : bool
            If True, enforce bounding by keeping the value of the
            parameters constant out of the defined bounding area.
        bounded : bool
            If True performs bounded optimization if the fitter
            supports it. Currently only mpfit support bounding.
        update_plot : bool
            If True, the plot is updated during the optimization
            process. It slows down the optimization but it permits
            to visualize the optimization evolution.
        kind : {'std', 'smart'}
            If 'std' (default) performs standard fit. If 'smart'
            performs smart_fit

        **kwargs : key word arguments
            Any extra key word argument will be passed to the chosen
            fitter

        See Also
        --------
        multifit, smart_fit

        """
        if kind == "smart":
            self.smart_fit(
                fitter=fitter,
                method=method,
                grad=grad,
                bounded=bounded,
                ext_bounding=ext_bounding,
                update_plot=update_plot,
                **kwargs
            )
        elif kind == "std":
            Model.fit(
                self,
                fitter=fitter,
                method=method,
                grad=grad,
                bounded=bounded,
                ext_bounding=ext_bounding,
                update_plot=update_plot,
                **kwargs
            )
        else:
            raise ValueError("kind must be either 'std' or 'smart'." "'%s' provided." % kind)
Esempio n. 3
0
 def fit(self, *args, **kwargs):
     if 'kind' in kwargs and kwargs['kind'] == 'smart':
         self.smart_fit(*args, **kwargs)
     else:
         Model.fit(self, *args, **kwargs)
Esempio n. 4
0
    def fit(self,
            fitter=None,
            method='ls',
            grad=False,
            bounded=False,
            ext_bounding=False,
            update_plot=False,
            kind='std',
            **kwargs):
        """Fits the model to the experimental data

        Parameters
        ----------
        fitter : {None, "leastsq", "odr", "mpfit", "fmin"}
            The optimizer to perform the fitting. If None the fitter
            defined in the Preferences is used. leastsq is the most
            stable but it does not support bounding. mpfit supports
            bounding. fmin is the only one that supports
            maximum likelihood estimation, but it is less robust than
            the Levenberg–Marquardt based leastsq and mpfit, and it is
            better to use it after one of them to refine the estimation.
        method : {'ls', 'ml'}
            Choose 'ls' (default) for least squares and 'ml' for
            maximum-likelihood estimation. The latter only works with
            fitter = 'fmin'.
        grad : bool
            If True, the analytical gradient is used if defined to
            speed up the estimation.
        ext_bounding : bool
            If True, enforce bounding by keeping the value of the
            parameters constant out of the defined bounding area.
        bounded : bool
            If True performs bounded optimization if the fitter
            supports it. Currently only mpfit support bounding.
        update_plot : bool
            If True, the plot is updated during the optimization
            process. It slows down the optimization but it permits
            to visualize the optimization evolution.
        kind : {'std', 'smart'}
            If 'std' (default) performs standard fit. If 'smart'
            performs smart_fit

        **kwargs : key word arguments
            Any extra key word argument will be passed to the chosen
            fitter

        See Also
        --------
        multifit, smart_fit

        """
        if kind == 'smart':
            self.smart_fit(fitter=fitter,
                           method=method,
                           grad=grad,
                           bounded=bounded,
                           ext_bounding=ext_bounding,
                           update_plot=update_plot,
                           **kwargs)
        elif kind == 'std':
            Model.fit(self,
                      fitter=fitter,
                      method=method,
                      grad=grad,
                      bounded=bounded,
                      ext_bounding=ext_bounding,
                      update_plot=update_plot,
                      **kwargs)
        else:
            raise ValueError('kind must be either \'std\' or \'smart\'.'
                             '\'%s\' provided.' % kind)
Esempio n. 5
0
def _estimate_gain(ns,
                   cs,
                   weighted=False,
                   higher_than=None,
                   plot_results=False,
                   binning=0,
                   pol_order=1):
    if binning > 0:
        factor = 2**binning
        remainder = np.mod(ns.shape[1], factor)
        if remainder != 0:
            ns = ns[:, remainder:]
            cs = cs[:, remainder:]
        new_shape = (ns.shape[0], ns.shape[1] / factor)
        ns = rebin(ns, new_shape)
        cs = rebin(cs, new_shape)

    noise = ns - cs
    variance = np.var(noise, 0)
    average = np.mean(cs, 0).squeeze()

    # Select only the values higher_than for the calculation
    if higher_than is not None:
        sorting_index_array = np.argsort(average)
        average_sorted = average[sorting_index_array]
        average_higher_than = average_sorted > higher_than
        variance_sorted = variance.squeeze()[sorting_index_array]
        variance2fit = variance_sorted[average_higher_than]
        average2fit = average_sorted[average_higher_than]
    else:
        variance2fit = variance
        average2fit = average

    fit = np.polyfit(average2fit, variance2fit, pol_order)
    if weighted is True:
        from hyperspy._signals.spectrum import Spectrum
        from hyperspy.model import Model
        from hyperspy.components import Line
        s = Spectrum(variance2fit)
        s.axes_manager.signal_axes[0].axis = average2fit
        m = Model(s)
        l = Line()
        l.a.value = fit[1]
        l.b.value = fit[0]
        m.append(l)
        m.fit(weights=True)
        fit[0] = l.b.value
        fit[1] = l.a.value

    if plot_results is True:
        plt.figure()
        plt.scatter(average.squeeze(), variance.squeeze())
        plt.xlabel('Counts')
        plt.ylabel('Variance')
        plt.plot(average2fit, np.polyval(fit, average2fit), color='red')
    results = {
        'fit': fit,
        'variance': variance.squeeze(),
        'counts': average.squeeze()
    }

    return results
Esempio n. 6
0
 def fit(self, fitter=None, method='ls', grad=False, weights=None,
         bounded=False, ext_bounding=False, update_plot=False, 
         kind='std', **kwargs):
     """Fits the model to the experimental data
     
     Parameters
     ----------
     fitter : {None, "leastsq", "odr", "mpfit", "fmin"}
         The optimizer to perform the fitting. If None the fitter
         defined in the Preferences is used. leastsq is the most 
         stable but it does not support bounding. mpfit supports
         bounding. fmin is the only one that supports 
         maximum likelihood estimation, but it is less robust than 
         the Levenberg–Marquardt based leastsq and mpfit, and it is 
         better to use it after one of them to refine the estimation.
     method : {'ls', 'ml'}
         Choose 'ls' (default) for least squares and 'ml' for 
         maximum-likelihood estimation. The latter only works with 
         fitter = 'fmin'.
     grad : bool
         If True, the analytical gradient is used if defined to 
         speed up the estimation. 
     weights : {None, True, numpy.array}
         If None, performs standard least squares. If True 
         performs weighted least squares where the weights are 
         calculated using spectrum.Spectrum.estimate_poissonian_noise_variance. 
         Alternatively, external weights can be supplied by passing
         a weights array of the same dimensions as the signal.
     ext_bounding : bool
         If True, enforce bounding by keeping the value of the 
         parameters constant out of the defined bounding area.
     bounded : bool
         If True performs bounded optimization if the fitter 
         supports it. Currently only mpfit support bounding. 
     update_plot : bool
         If True, the plot is updated during the optimization 
         process. It slows down the optimization but it permits
         to visualize the optimization evolution. 
     kind : {'std', 'smart'}
         If 'std' (default) performs standard fit. If 'smart' 
         performs smart_fit
     
     **kwargs : key word arguments
         Any extra key word argument will be passed to the chosen
         fitter
         
     See Also
     --------
     multifit, smart_fit
         
     """
     if kind == 'smart':
         self.smart_fit(fitter=fitter,
                        method=method,
                        grad=grad,
                        weights=weights,
                        bounded=bounded,
                        ext_bounding=ext_bounding,
                        update_plot=update_plot,
                        **kwargs)
     elif kind == 'std':
         Model.fit(self,
                   fitter=fitter,
                   method=method,
                   grad=grad,
                   weights=weights,
                   bounded=bounded,
                   ext_bounding=ext_bounding,
                   update_plot=update_plot,
                   **kwargs)
     else:
         raise ValueError('kind must be either \'std\' or \'smart\'.'
         '\'%s\' provided.' % kind)
Esempio n. 7
0
 def fit(self, *args, **kwargs):
     if "kind" in kwargs and kwargs["kind"] == "smart":
         self.smart_fit(*args, **kwargs)
     else:
         Model.fit(self, *args, **kwargs)
Esempio n. 8
0
 def fit(self, *args, **kwargs):
     if 'kind' in kwargs and kwargs['kind'] == 'smart':
         self.smart_fit(*args, **kwargs)
     else:
         Model.fit(self, *args, **kwargs)