Example #1
0
    def start(self, **kwargs):
        """Starts SAMFire.

        Parameters
        ----------
        **kwargs : key-word arguments
            Any key-word arguments to be passed to Model.fit() call
        """
        self._setup()
        if self._workers and self.pool is not None:
            self.pool.update_parameters()
        self._args = kwargs
        num_of_strat = len(self.strategies)
        total_size = self.model.axes_manager.navigation_size - self.pixels_done
        self._progressbar = progressbar(total=total_size)
        try:
            while True:
                self._run_active_strategy()
                self.plot()
                if self.pixels_done == self.model.axes_manager.navigation_size:
                    # all pixels are done, no need to go to the next strategy
                    break
                if self._active_strategy_ind == num_of_strat - 1:
                    # last one just finished running
                    break
                self.change_strategy(self._active_strategy_ind + 1)
        except KeyboardInterrupt:
            if self.pool is not None:
                _logger.warning(
                    'Collecting already started pixels, please wait')
                self.pool.collect_results()
Example #2
0
    def project(self, X, return_error=False):
        """Project the learnt components on the data.

        Parameters
        ----------
        X : {numpy.ndarray, iterator}
            [n_samples x n_features] matrix of observations
            or an iterator that yields n_samples, each with n_features elements.
        return_error : bool
            If True, returns the sparse error matrix as well. Otherwise only
            the weights (loadings)

        """
        H = []
        if return_error:
            E = []

        num = None
        if isinstance(X, np.ndarray):
            num = X.shape[0]
            X = iter(X)
        for v in progressbar(X, leave=False, total=num):
            h, e = _solveproj(v, self.W, self.lambda1, self.kappa, vmax=np.inf)
            H.append(h.copy())
            if return_error:
                E.append(e.copy())

        H = np.stack(H, axis=-1)
        if return_error:
            return H, np.stack(E, axis=-1)
        else:
            return H
Example #3
0
    def start(self, **kwargs):
        """Starts SAMFire.

        Parameters
        ----------
        **kwargs : key-word arguments
            Any key-word arguments to be passed to Model.fit() call
        """
        self._setup()
        if self._workers and self.pool is not None:
            self.pool.update_parameters()
        self._args = kwargs
        num_of_strat = len(self.strategies)
        total_size = self.model.axes_manager.navigation_size - self.pixels_done
        self._progressbar = progressbar(total=total_size)
        try:
            while True:
                self._run_active_strategy()
                self.plot()
                if self.pixels_done == self.model.axes_manager.navigation_size:
                    # all pixels are done, no need to go to the next strategy
                    break
                if self._active_strategy_ind == num_of_strat - 1:
                    # last one just finished running
                    break
                self.change_strategy(self._active_strategy_ind + 1)
        except KeyboardInterrupt:
            if self.pool is not None:
                _logger.warning('Collecting already started pixels, please wait')
                self.pool.collect_results()
Example #4
0
 def project(self, X):
     num = None
     if isinstance(X, np.ndarray):
         num = X.shape[0]
         X = iter(X)
     for v in progressbar(X, leave=False, total=num):
         r, _ = _solveproj(v, self.L, self.I, self.lambda2)
         self.R.append(r.copy())
Example #5
0
 def project(self, X):
     num = None
     if isinstance(X, np.ndarray):
         num = X.shape[0]
         X = iter(X)
     for v in progressbar(X, leave=False, total=num):
         r, _ = _solveproj(v, self.L, self.I, self.lambda2)
         self.R.append(r.copy())
Example #6
0
    def fit(self, X, iterating=None):
        if self.nfeatures is None:
            X = self._setup(X)

        if iterating is None:
            iterating = self.iterating
        else:
            self.iterating = iterating
        num = None
        if isinstance(X, np.ndarray):
            num = X.shape[0]
            X = iter(X)

        for z in progressbar(X, leave=False, total=num):
            if not self.t or not (self.t + 1) % 10:
                _logger.info("Processing sample : %s" % (self.t + 1))

            # TODO: what about z.min()?
            thislambda2 = self.lambda2  # * z.max()
            thislambda1 = self.lambda1  # * z.max()

            r, e = _solveproj(z, self.L, self.I, thislambda2)

            self.R.append(r)
            if not iterating:
                self.E.append(e)

            if self.method == 'CF':
                # Closed-form solution
                self.A += np.outer(r, r.T)
                self.B += np.outer((z - e), r.T)
                self.L = np.dot(self.B, scipy.linalg.inv(self.A + self.I))
            elif self.method == 'BCD':
                # Block-coordinate descent
                self.A += np.outer(r, r.T)
                self.B += np.outer((z - e), r.T)
                self.L = _updatecol(self.L, self.A, self.B, self.I)
            elif self.method == 'SGD':
                # Stochastic gradient descent
                learn = self.learning_rate * (1 + self.learning_rate *
                                              thislambda1 * self.t)
                self.L -= (np.dot(self.L, np.outer(r, r.T))
                           - np.outer((z - e), r.T)
                           + thislambda1 * self.L) / learn
            elif self.method == 'MomentumSGD':
                # Stochastic gradient descent with momentum
                learn = self.learning_rate * (1 + self.learning_rate *
                                              thislambda1 * self.t)
                vold = self.momentum * self.vnew
                self.vnew = (np.dot(self.L, np.outer(r, r.T))
                             - np.outer((z - e), r.T)
                             + thislambda1 * self.L) / learn
                self.L -= (vold + self.vnew)
            self.t += 1
Example #7
0
    def fit(self, X, batch_size=None):
        """Learn NMF components from the data.

        Parameters
        ----------
        X : {numpy.ndarray, iterator}
            [nsamplex x nfeatures] matrix of observations
            or an iterator that yields samples, each with nfeatures elements.
        batch_size : {None, int}
            If not None, learn the data in batches, each of batch_size samples
            or less.
        """
        if self.nfeatures is None:
            X = self._setup(X)

        num = None
        prod = np.outer
        if batch_size is not None:
            if isinstance(X, np.ndarray):
                raise ValueError("can't batch iterating data")
            else:
                prod = np.dot
                length = X.shape[0]
                num = max(length // batch_size, 1)
                X = np.array_split(X, num, axis=0)

        if isinstance(X, np.ndarray):
            num = X.shape[0]
            X = iter(X)

        r, h = self.r, self.h

        for v in progressbar(X, leave=False, total=num, disable=num == 1):
            h, r = _solveproj(v, self.W, self.lambda1, self.kappa, r=r, h=h)
            self.v = v
            self.r = r
            self.h = h
            self.H.append(h)
            if self.R is not None:
                self.R.append(r)

            # Only need to update A, B when not tracking subspace
            if not self.subspace_tracking:
                self.A += prod(h, h.T)
                self.B += prod((v.T - r), h.T)

            self._solve_W()
            self.t += 1
        self.r = r
        self.h = h
Example #8
0
def find_features_by_separation(
    signal,
    separation_range,
    separation_step=1,
    threshold_rel=0.02,
    pca=False,
    subtract_background=False,
    normalize_intensity=False,
    show_progressbar=True,
):
    """
    Do peak finding with a varying amount of peak separation
    constrained.

    Inspiration from the program Smart Align by Lewys Jones.

    Parameters
    ----------
    signal : HyperSpy 2D signal
    separation_range : tuple
        Lower and upper end of minimum pixel distance between the
        features.
    separation_step : int, optional
    show_progressbar : bool, default True

    Returns
    -------
    tuple, (separation_list, peak_list)

    """
    separation_list = range(separation_range[0], separation_range[1],
                            separation_step)

    separation_value_list = []
    peak_list = []
    for separation in progressbar(separation_list,
                                  disable=not show_progressbar):
        peaks = get_atom_positions(signal,
                                   separation=separation,
                                   threshold_rel=threshold_rel,
                                   pca=pca,
                                   normalize_intensity=normalize_intensity,
                                   subtract_background=subtract_background)

        separation_value_list.append(separation)
        peak_list.append(peaks)

    return (separation_value_list, peak_list)
Example #9
0
    def fit(self, X, batch_size=None):
        """Learn NMF components from the data.

        Parameters
        ----------
        X : {numpy.ndarray, iterator}
            [n_samples x n_features] matrix of observations
            or an iterator that yields samples, each with n_features elements.
        batch_size : {None, int}
            If not None, learn the data in batches, each of batch_size samples
            or less.

        """
        if self.n_features is None:
            X = self._setup(X)

        num = None
        prod = np.outer
        if batch_size is not None:
            if not isinstance(X, np.ndarray):
                raise ValueError("can't batch iterating data")
            else:
                prod = np.dot
                length = X.shape[0]
                num = max(length // batch_size, 1)
                X = np.array_split(X, num, axis=0)

        if isinstance(X, np.ndarray):
            num = X.shape[0]
            X = iter(X)

        h, e = self.h, self.e

        for v in progressbar(X, leave=False, total=num, disable=num == 1):
            h, e = _solveproj(v, self.W, self.lambda1, self.kappa, h=h, e=e)
            self.v = v
            self.e = e
            self.h = h
            self.H.append(h)
            if self.E is not None:
                self.E.append(e)

            self._solve_W(prod(h, h.T), prod((v.T - e), h.T))
            self.t += 1

        self.h = h
        self.e = e
Example #10
0
 def interpolate_in_between(self,
                            start,
                            end,
                            delta=3,
                            show_progressbar=None,
                            **kwargs):
     """Replace the data in a given range by interpolation.
     The operation is performed in place.
     Parameters
     ----------
     start, end : {int | float}
         The limits of the interval. If int they are taken as the
         axis index. If float they are taken as the axis value.
     delta : {int | float}
         The windows around the (start, end) to use for interpolation
     show_progressbar : None or bool
         If True, display a progress bar. If None the default is set in
         `preferences`.
     All extra keyword arguments are passed to
     scipy.interpolate.interp1d. See the function documentation
     for details.
     Raises
     ------
     SignalDimensionError if the signal dimension is not 1.
     """
     if show_progressbar is None:
         show_progressbar = preferences.General.show_progressbar
     self._check_signal_dimension_equals_one()
     axis = self.axes_manager.signal_axes[0]
     i1 = axis._get_index(start)
     i2 = axis._get_index(end)
     if isinstance(delta, float):
         delta = int(delta / axis.scale)
     i0 = int(np.clip(i1 - delta, 0, np.inf))
     i3 = int(np.clip(i2 + delta, 0, axis.size))
     with progressbar(total=self.axes_manager.navigation_size,
                      disable=not show_progressbar,
                      leave=True) as pbar:
         for i, dat in enumerate(self._iterate_signal()):
             dat_int = sp.interpolate.interp1d(
                 list(range(i0, i1)) + list(range(i2, i3)),
                 dat[i0:i1].tolist() + dat[i2:i3].tolist(), **kwargs)
             dat[i1:i2] = dat_int(list(range(i1, i2)))
             pbar.update(1)
     self.events.data_changed.trigger(obj=self)
Example #11
0
 def interpolate_in_between(self, start, end, delta=3,
                            show_progressbar=None, **kwargs):
     """Replace the data in a given range by interpolation.
     The operation is performed in place.
     Parameters
     ----------
     start, end : {int | float}
         The limits of the interval. If int they are taken as the
         axis index. If float they are taken as the axis value.
     delta : {int | float}
         The windows around the (start, end) to use for interpolation
     show_progressbar : None or bool
         If True, display a progress bar. If None the default is set in
         `preferences`.
     All extra keyword arguments are passed to
     scipy.interpolate.interp1d. See the function documentation
     for details.
     Raises
     ------
     SignalDimensionError if the signal dimension is not 1.
     """
     if show_progressbar is None:
         show_progressbar = preferences.General.show_progressbar
     self._check_signal_dimension_equals_one()
     axis = self.axes_manager.signal_axes[0]
     i1 = axis._get_index(start)
     i2 = axis._get_index(end)
     if isinstance(delta, float):
         delta = int(delta / axis.scale)
     i0 = int(np.clip(i1 - delta, 0, np.inf))
     i3 = int(np.clip(i2 + delta, 0, axis.size))
     with progressbar(total=self.axes_manager.navigation_size,
                      disable=not show_progressbar,
                      leave=True) as pbar:
         for i, dat in enumerate(self._iterate_signal()):
             dat_int = sp.interpolate.interp1d(
                 list(range(i0, i1)) + list(range(i2, i3)),
                 dat[i0:i1].tolist() + dat[i2:i3].tolist(),
                 **kwargs)
             dat[i1:i2] = dat_int(list(range(i1, i2)))
             pbar.update(1)
     self.events.data_changed.trigger(obj=self)
Example #12
0
    def refine_position_gaussian(self, image=None, show_progressbar=True,
                                 percent_to_nn=0.40, mask_radius=None):
        """Fit several atoms at the same time.

        For datasets where the atoms are too close together to do the fitting
        individually.

        Parameters
        ----------
        image : NumPy 2D array, optional
        show_progressbar : bool, default True
        percent_to_nn : scalar
            Default 0.4
        mask_radius : float, optional
            Radius of the mask around each atom. If this is not set,
            the radius will be the distance to the nearest atom in the
            same sublattice times the `percent_to_nn` value.
            Note: if `mask_radius` is not specified, the Atom_Position objects
            must have a populated nearest_neighbor_list. This is normally done
            through the sublattice class, but can also be done manually.

        Examples
        --------
        >>> dl = am.dummy_data.get_dumbbell_heterostructure_dumbbell_lattice()
        >>> dl.refine_position_gaussian(show_progressbar=False)

        """
        if image is None:
            if self.original_image is None:
                image = self.image
            else:
                image = self.original_image
        n_tot = len(self.sublattice_list[0].atom_list)
        for i_atom in progressbar(range(n_tot), desc="Gaussian fitting",
                                  disable=not show_progressbar):
            atom_list = []
            for sublattice in self.sublattice_list:
                atom_list.append(sublattice.atom_list[i_atom])
            afr.fit_atom_positions_gaussian(
                    atom_list, image, percent_to_nn=percent_to_nn,
                    mask_radius=mask_radius)
Example #13
0
    def fit(self, X, batch_size=None):
        """Learn NMF components from the data.

        Parameters
        ----------
        X : {numpy.ndarray, iterator}
            [nsamplex x nfeatures] matrix of observations
            or an iterator that yields samples, each with nfeatures elements.
        batch_size : {None, int}
            If not None, learn the data in batches, each of batch_size samples
            or less.
        """
        if self.nfeatures is None:
            X = self._setup(X)

        num = None
        prod = np.outer
        if batch_size is not None:
            if isinstance(X, np.ndarray):
                raise ValueError("can't batch iterating data")
            else:
                prod = np.dot
                length = X.shape[0]
                num = max(length // batch_size, 1)
                X = np.array_split(X, num, axis=0)
        if isinstance(X, np.ndarray):
            num = X.shape[0]
            X = iter(X)
        r, h = self.r, self.h
        for v in progressbar(X, leave=False, total=num, disable=num == 1):
            h, r = _solveproj(v, self.W, self.lambda1, self.kappa, r=r, h=h)
            self.H.append(h)
            if self.R is not None:
                self.R.append(r)

            self.A += prod(h, h.T)
            self.B += prod((v.T - r), h.T)
            self._solve_W()
        self.r = r
        self.h = h
Example #14
0
    def project(self, X, return_R=False):
        """Project the learnt components on the data.

        Parameters
        ----------
        X : {numpy.ndarray, iterator}
            [nsamplex x nfeatures] matrix of observations
            or an iterator that yields samples, each with nfeatures elements.
        return_R : bool
            If True, returns the sparse error matrix as well. Otherwise only
            the weights (loadings)
        """
        H = []
        if return_R:
            R = []

        num = None
        W = self.W
        lam1 = self.lambda1
        kap = self.kappa
        if isinstance(X, np.ndarray):
            num = X.shape[0]
            X = iter(X)
        for v in progressbar(X, leave=False, total=num):
            # want to start with fresh results and not clip, so that chunks are
            # smooth
            h, r = _solveproj(v, W, lam1, kap, vmax=np.inf)
            H.append(h.copy())
            if return_R:
                R.append(r.copy())

        H = np.stack(H, axis=-1)
        if return_R:
            return H, np.stack(R, axis=-1)
        else:
            return H
Example #15
0
def _get_dumbbell_arrays(
        s, dumbbell_positions, dumbbell_vector, show_progressbar=True):
    """
    Parameters
    ----------
    s : HyperSpy 2D signal
    dumbbell_positions : list of atomic positions
        In the form [[x0, y0], [x1, y1], [x2, y2], ...]
    dumbbell_vector : tuple
    show_progressbar : bool, default True

    Returns
    -------
    Dumbbell lists : tuple of lists

    Examples
    --------
    >>> import atomap.initial_position_finding as ipf
    >>> s = am.dummy_data.get_dumbbell_signal()
    >>> dumbbell_positions = am.get_atom_positions(s, separation=16)
    >>> atom_positions = am.get_atom_positions(s, separation=4)
    >>> dumbbell_vector = ipf.find_dumbbell_vector(atom_positions)
    >>> d0, d1 = ipf._get_dumbbell_arrays(s, dumbbell_positions,
    ...                                   dumbbell_vector)

    """
    next_pos_list0 = []
    next_pos_list1 = []
    for x, y in zip(dumbbell_positions[:, 0], dumbbell_positions[:, 1]):
        next_pos_list0.append([dumbbell_vector[0]+x, dumbbell_vector[1]+y])
        next_pos_list1.append([-dumbbell_vector[0]+x, -dumbbell_vector[1]+y])
    next_pos_list0 = np.array(next_pos_list0)
    next_pos_list1 = np.array(next_pos_list1)

    mask_radius = 0.5*(dumbbell_vector[0]**2+dumbbell_vector[1]**2)**0.5

    iterator = zip(
            dumbbell_positions[:, 0], dumbbell_positions[:, 1],
            next_pos_list0, next_pos_list1)
    total_num = len(next_pos_list0)
    dumbbell_list0, dumbbell_list1 = [], []
    for x, y, next_pos0, next_pos1 in progressbar(
            iterator, total=total_num, desc="Finding dumbbells",
            disable=not show_progressbar):
        mask1 = _make_circular_mask(
                next_pos0[1], next_pos0[0],
                s.data.shape[0], s.data.shape[1],
                mask_radius)
        mask2 = _make_circular_mask(
                next_pos1[1], next_pos1[0],
                s.data.shape[0], s.data.shape[1],
                mask_radius)
        pos1_sum = (s.data*mask1).sum()
        pos2_sum = (s.data*mask2).sum()
        if pos1_sum > pos2_sum:
            dumbbell_list0.append([x, y])
            dumbbell_list1.append(next_pos0)
        else:
            dumbbell_list0.append(next_pos1)
            dumbbell_list1.append([x, y])
    dumbbell_list0 = np.array(dumbbell_list0)
    dumbbell_list1 = np.array(dumbbell_list1)
    return(dumbbell_list0, dumbbell_list1)
Example #16
0
    def model_zero_loss_peak_tail(self,
                                  signal_range,
                                  show_progressbar=None,
                                  *args,
                                  **kwargs):
        '''
        Model the zero-loss peak tail using a (power-law) model fit. The fit
        window is set using the signal_range tuple in axis units (not indices).
        Spectral intensity at energies above the signal_range is substituted by
        the model tail. The fit is performed using `remove_background`,
        *args and **kwargs are passed to this method.

        Parameters
        ----------
        signal_range : tuple
         Initial and final position of the fit window. Given in axis units. The
         components can be single or multidimensional. If multidimensional, an
         array or a signal with the same dimensions as the navigation dimension
         should be used.

        Returns
        -------
        zlp : EELSSignal
         Modeled tail ZLP model.

        Examples
        --------
        >>> s = hs.load('some_eels.h5')
        >>> zlp = s.model_zero_loss_peak_tail((0.5, 1.),
        ...                                   fast=False,
        ...                                   show_progressbar=False)
        '''
        self._check_signal_dimension_equals_one()
        if not isinstance(signal_range, tuple):
            raise AttributeError('signal_range not recognized:'
                                 'must be a tuple!')

        if len(signal_range) != 2:
            raise AttributeError('signal_range not recognized,'
                                 'must be len = 2')

        axis = self.axes_manager.signal_axes[0]

        if isinstance(signal_range[0], Number) and (isinstance(
                signal_range[1], Number)):
            zlp = self - self.remove_background(
                signal_range,
                show_progressbar=show_progressbar,
                *args,
                **kwargs)
            I2 = axis.value2index(signal_range[1])
            ids = (slice(None), ) * axis.index_in_array + (slice(None,
                                                                 I2), Ellipsis)
            zlp.data[ids] = self.data[ids]
            return zlp

        if isinstance(signal_range[0],
                      (np.ndarray, hs.signals.BaseSignal)) or (isinstance(
                          signal_range[1],
                          (np.ndarray, hs.signals.BaseSignal))):
            signal_range_ini = self._check_adapt_map_input(signal_range[0])
            signal_range_fin = self._check_adapt_map_input(signal_range[1])
            for name in ['signal_range_ini', 'signal_range_fin']:
                parameter = eval(name)
                if isinstance(parameter, ValueError):
                    parameter.args = (parameter.args[0] + name, )
                    raise parameter
            zlp = self.deepcopy()

            for si in progressbar(self, disable=not show_progressbar):
                indices = self.axes_manager.indices
                E1 = signal_range_ini.inav[indices].data[0]
                E2 = signal_range_fin.inav[indices].data[0]
                ri = si.remove_background((E1, E2),
                                          show_progressbar=False,
                                          *args,
                                          **kwargs)
                I2 = axis.value2index(E2)
                ids = (*indices, slice(I2, None), Ellipsis)
                zlp.data[ids] = si.data[I2:] - ri.data[I2:]
            return zlp
Example #17
0
    def relativistic_kramers_kronig(self,
                                    zlp=None,
                                    n=None,
                                    t=None,
                                    delta=0.9,
                                    fsmooth=None,
                                    iterations=20,
                                    chi2_target=1e-4,
                                    average=False,
                                    full_output=True,
                                    show_progressbar=None,
                                    *args,
                                    **kwargs):
        r"""Calculate the complex dielectric function from a single scattering
        distribution (SSD) using the Kramers-Kronig relations and a relativistic
        correction for thin slab geometry.

        The input SSD should be and EELSSpectrum instance, containing only
        inelastic scattering information (elastic and plural scattering
        deconvolved). The dielectric information is obtained by normalization of
        the inelastic scattering using the elastic scattering intensity and
        either refractive index or thickness information.

        A full complex dielectric function (CDF) is obtained by Kramers-Kronig
        transform, solved using FFT as in `kramers_kronig_analysis`. This inital
        guess for the CDF is improved in an iterative loop, devised to
        approximately subtract the relativistic contribution supposing an
        unoxidized planar surface.

        The loop runs until a chi-square target has been achieved or for a
        maximum number of iterations. This behavior can be modified using the
        parameters below. This method does not account for instrumental and
        finite-size effects.

        Note: either refractive index or thickness (`n` or `t`) are required.
        If both are None or if both are provided an exception is raised. Many
        input types are accepted for zlp, n and t parameters, which are parsed
        using `self._check_adapt_map_input`, see the documentation therein for
        more information.

        Parameters
        ----------
        zlp: {None, number, ndarray, Signal}
            ZLP intensity. It is optional (can be None) if t is given,
            full_output is False and no iterations are run. In any other case,
            the ZLP is required either to perform the normalization step,
            to calculate the thickness and/or to calculate the relativistic
            correction.
        n: {None, number, ndarray, Signal}
            The medium refractive index. Used for normalization of the
            SSD to obtain the energy loss function. If given the thickness
            is estimated and returned. It is only required when `t` is None.
        t: {None, number, ndarray, Signal}
            The sample thickness in nm. Used for normalization of the
            SSD to obtain the energy loss function. It is only required when
            `n` is None.
        delta : {None, float}
            Optionally apply a fractional limit to the relativistic correction
            in order to improve stability. Can be None, if no limit is desired.
            A value of around 0.9 ensures the correction is never larger than
            the original EELS signal, producing a negative spectral region.
        fsmooth : {None, float}
            Optionally apply a gaussian filter to the relativistic correction
            in order to eliminate high-frequency noise. The cut-off is set in
            the energy-loss scale, e.g. fsmooth = 1.5 (eV).
        iterations: {None, int}
            Number of the iterations for the internal loop to remove the
            relativistic contribution. If None, the loop runs until a chi-square
            target has been achieved (see below).
        chi2_target : float
            The average chi-square test score is measured in each iteration, and
            the reconstruction loop terminates when the target score is reached.
            See `_chi2_score` for more information.
        average : bool
            If True, use the average of the obtained dielectric functions over
            the navigation dimensions to calculate the relativistic correction.
            False by default, should only be used when analyzing spectra from a
            homogenous sample, as only one dielectric function is retrieved.
            This switch has no effect if only one spectrum is being analyzed.
        full_output : bool
            If True, return a dictionary that contains the estimated
            thickness if `t` is None and the estimated relativistic correction
            if `iterations` > 1.

        Returns
        -------
        eps: DielectricFunction instance
            The complex dielectric function results,

                .. math::
                    \epsilon = \epsilon_1 + i*\epsilon_2,

            contained in an DielectricFunction instance.
        output: Dictionary (optional)
            A dictionary of optional outputs with the following keys:

            ``thickness``
                The estimated thickness in nm calculated by normalization of
                the corrected spectrum (only when `t` is None).

            ``relativistic correction``
               The estimated relativistic correction at the final iteration.

        Raises
        ------
        ValueError
            If both `n` and `t` are undefined (None).
        AttributeError
            If the beam_energy or the collection semi-angle are not defined in
            metadata.

        See also
        --------
        get_relativistic_spectrum, _check_adapt_map_input

        """
        # prepare data arrays
        if iterations == 1:
            # In this case s.data is not modified so there is no need to make
            # a deep copy.
            s = self.isig[0.:]
        else:
            s = self.isig[0.:].deepcopy()

        sorig = self.isig[0.:]

        # Avoid singularity at 0
        if s.axes_manager.signal_axes[0].axis[0] == 0:
            s = s.isig[1:]
            sorig = self.isig[1:]
        axis = s.axes_manager.signal_axes[0]
        eaxis = axis.axis.copy()

        # Constants and units, electron mass, beam energy and collection angle
        me = constants.value(
            'electron mass energy equivalent in MeV') * 1e3  # keV
        try:
            e0 = s.metadata.Acquisition_instrument.TEM.beam_energy
        except BaseException:
            raise AttributeError("Please define the beam energy."
                                 "You can do this e.g. by using the "
                                 "set_microscope_parameters method")
        try:
            beta = s.metadata.Acquisition_instrument.TEM.Detector.\
                EELS.collection_angle
        except BaseException:
            raise AttributeError("Please define the collection semi-angle. "
                                 "You can do this e.g. by using the "
                                 "set_microscope_parameters method")

        # Mapped parameters, zlp, n and t
        if isinstance(zlp, hs.signals.Signal1D):
            if (zlp.axes_manager.signal_dimension
                    == 1) and (zlp.axes_manager.navigation_shape
                               == self.axes_manager.navigation_shape):
                zlp = zlp.integrate1D(axis.index_in_axes_manager)
        elif zlp is None and (full_output or iterations > 1):
            raise AttributeError("Please define the zlp parameter when "
                                 "full output or iterations > 1 are selected.")
        zlp = self._check_adapt_map_input(zlp)
        n = self._check_adapt_map_input(n)
        t = self._check_adapt_map_input(t)
        for name in ['zlp', 'n', 't']:
            parameter = eval(name)
            if isinstance(parameter, ValueError):
                parameter.args = (parameter.args[0] + name, )
                raise parameter

        # select refractive or thickness loop
        if n is None and t is None:
            raise ValueError('Thickness and refractive index undefined.'
                             'Please provide one of them.')
        elif n is not None and t is not None:
            raise ValueError('Thickness and refractive index both defined.'
                             'Please provide only one of them.')
        elif n is not None:
            refractive_loop = True
            if (zlp is not None) and (full_output is True or iterations > 1):
                t = self._get_navigation_signal().T
        elif t is not None:
            refractive_loop = False
            if zlp is None:
                raise ValueError('Zero-loss intensity is needed for thickness '
                                 'normalization. Provide also parameter zlp')

        # Slicer to get the signal data from 0 to axis.size
        slicer = s.axes_manager._get_data_slice([
            (axis.index_in_array, slice(None, axis.size)),
        ])

        # Kinetic definitions
        ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me)**2
        tgt = e0 * (2 * me + e0) / (me + e0)
        rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)

        # prepare the output dielectric function
        eps = s._deepcopy_with_new_data(np.zeros_like(s.data, np.complex128))
        eps.set_signal_type("DielectricFunction")
        eps.metadata.General.title = (self.metadata.General.title +
                                      'KKA dielectric function')
        if eps.tmp_parameters.has_item('filename'):
            eps.tmp_parameters.filename = (
                self.tmp_parameters.filename +
                '_CDF_after_Kramers_Kronig_transform')

        from dielectric import ModifiedCDF
        eps = ModifiedCDF(eps)
        eps_corr = eps.deepcopy()

        # progressbar support
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        pbar = progressbar(total=iterations,
                           desc='1.00e+30',
                           disable=not show_progressbar)

        # initialize iteration control
        io = 0
        chi2 = chi2_target * 1e3
        while (io < iterations) and (chi2 > chi2_target):
            # Calculation of the ELF by normalization of the SSD
            Im = s.data / (np.log(1 + (beta * tgt / eaxis)**2)) / axis.scale

            if refractive_loop:
                # normalize using the refractive index.
                K = (Im / eaxis).sum(axis=axis.index_in_array) * axis.scale
                K = (K / (np.pi / 2) / (1 - 1. / n.data**2))
                # Calculate the thickness only if possible and required
                if full_output or iterations > 1:
                    te = (332.5 * K * ke / zlp.data)
                    t.data = te.squeeze()
            else:
                # normalize using the thickness
                K = t.data * zlp.data / (332.5 * ke)
            Im = Im / K[..., None] if len(self) != 1 else Im / K

            # Kramers-Kronig transform
            esize = 2 * axis.size
            q = -2 * np.fft.fft(Im, esize, axis.index_in_array).imag / esize
            q[slicer] *= -1
            q = np.fft.fft(q, axis=axis.index_in_array)
            Re = q[slicer].real + 1
            epsabs = (Re**2 + Im**2)
            eps.data = Re / epsabs + 1j * Im / epsabs
            del Im, Re, q, epsabs

            if average and (eps.axes_manager.navigation_dimension > 0):
                eps_corr.data[:] = eps.data.mean(
                    eps.axes_manager.navigation_indices_in_array,
                    keepdims=True)
            else:
                eps_corr.data = eps.data.copy()

            if full_output or iterations > 1:
                # Relativistic correction
                #  Calculates relativistic correction from the Kroeger equation
                #  The difference with the relativistic DCS is subtracted
                scorr = eps_corr.get_relativistic_spectrum(zlp=zlp,
                                                           t=t,
                                                           output='diff',
                                                           *args,
                                                           **kwargs)
                # Limit the fractional correction
                if delta is not None:
                    fcorr = np.clip(scorr.data / sorig.data, -delta, delta)
                    scorr.data = fcorr * sorig.data

                # smooth
                if fsmooth is not None:
                    scorr.gaussian_filter(fsmooth)

                # Apply correction
                s.data = sorig.data - scorr.data
                s.data[s.data < 0.] = 0.

                if io > 0:
                    #chi2 = ((scorr.data-smemory)**2/smemory**2).sum()
                    chi2 = smemory._chi2_score(scorr)
                    chi2str = '{:0.2e}'.format(chi2)
                    pbar.set_description(chi2str)
                smemory = scorr.deepcopy()
            io += 1
            pbar.update(1)

        pbar.close()

        if full_output:
            output = {}
            tstr = self.metadata.General.title
            if refractive_loop:
                t.metadata.General.title = tstr + ', r-KKA thickness'
                output['thickness'] = t
            scorr.metadata.General.title = tstr + ',  r-KKA correction'
            output['relativistic correction'] = scorr
            return eps, output
        else:
            return eps
Example #18
0
    def quantification(self,
                       intensities,
                       method,
                       factors,
                       composition_units='atomic',
                       absorption_correction=False,
                       take_off_angle='auto',
                       thickness='auto',
                       convergence_criterion=0.5,
                       navigation_mask=1.0,
                       closing=True,
                       plot_result=False,
                       probe_area='auto',
                       max_iterations=30,
                       show_progressbar=None,
                       **kwargs):
        """
        Absorption corrected quantification using Cliff-Lorimer, the zeta-factor
        method, or ionization cross sections. The function iterates through
        quantification function until two successive interations don't change
        the final composition by a defined percentage critera (0.5% by default).

        Parameters
        ----------
        intensities: list of signal
            the intensitiy for each X-ray lines.
        method: {'CL', 'zeta', 'cross_section'}
            Set the quantification method: Cliff-Lorimer, zeta-factor, or
            ionization cross sections.
        factors: list of float
            The list of kfactors, zeta-factors or cross sections in same order
            as intensities. Note that intensities provided by Hyperspy are
            sorted by the alphabetical order of the X-ray lines.
            eg. factors =[0.982, 1.32, 1.60] for ['Al_Ka', 'Cr_Ka', 'Ni_Ka'].
        composition_units: {'atomic', 'weight'}
            The quantification returns the composition in 'atomic' percent by
            default, but can also return weight percent if specified.
        absorption_correction: bool
            Specify whether or not an absorption correction should be applied.
            'False' by default so absorption will not be applied unless
            specfied.
        take_off_angle : {'auto'}
            The angle between the sample surface and the vector along which
            X-rays travel to reach the centre of the detector.
        thickness: {'auto'}
            thickness in nm (can be a single value or
            have the same navigation dimension as the signal).
            NB: Must be specified for 'CL' method. For 'zeta' or 'cross_section'
            methods, first quantification step provides a mass_thickness
            internally during quantification.
        convergence_criterion: The convergence criterium defined as the percentage
            difference between 2 successive iterations. 0.5% by default.
        navigation_mask : None or float or signal
            The navigation locations marked as True are not used in the
            quantification. If float is given the vacuum_mask method is used to
            generate a mask with the float value as threhsold.
            Else provides a signal with the navigation shape. Only for the
            'Cliff-Lorimer' method.
        closing: bool
            If true, applied a morphologic closing to the mask obtained by
            vacuum_mask.
        plot_result : bool
            If True, plot the calculated composition. If the current
            object is a single spectrum it prints the result instead.
        probe_area = {'auto'}
            This allows the user to specify the probe_area for interaction with
            the sample needed specifically for the cross_section method of
            quantification. When left as 'auto' the pixel area is used,
            calculated from the navigation axes information.
        max_iterations : int
            An upper limit to the number of calculations for absorption correction.
        kwargs
            The extra keyword arguments are passed to plot.

        Returns
        -------
        A list of quantified elemental maps (signal) giving the composition of
        the sample in weight or atomic percent with absorption correciton taken
        into account based on the sample thickness estimate provided.

        If the method is 'zeta' this function also returns the mass thickness
        profile for the data.

        If the method is 'cross_section' this function also returns the atom
        counts for each element.

        Examples
        --------
        >>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
        >>> s.add_lines()
        >>> kfactors = [1.450226, 5.075602] #For Fe Ka and Pt La
        >>> bw = s.estimate_background_windows(line_width=[5.0, 2.0])
        >>> s.plot(background_windows=bw)
        >>> intensities = s.get_lines_intensity(background_windows=bw)
        >>> res = s.quantification(intensities, kfactors, plot_result=True,
        >>>                        composition_units='atomic')
        Fe (Fe_Ka): Composition = 15.41 atomic percent
        Pt (Pt_La): Composition = 84.59 atomic percent

        See also
        --------
        vacuum_mask
        """
        if isinstance(navigation_mask, float):
            if self.axes_manager.navigation_dimension > 0:
                navigation_mask = self.vacuum_mask(navigation_mask, closing)
            else:
                navigation_mask = None

        xray_lines = [
            intensity.metadata.Sample.xray_lines[0]
            for intensity in intensities
        ]
        it = 0
        if absorption_correction:
            if show_progressbar is None:  # pragma: no cover
                show_progressbar = preferences.General.show_progressbar
            if show_progressbar:
                pbar = progressbar(total=None,
                                   desc='Absorption correction calculation')

        composition = utils.stack(intensities,
                                  lazy=False,
                                  show_progressbar=False)

        if take_off_angle == 'auto':
            toa = self.get_take_off_angle()
        else:
            toa = take_off_angle

        #determining illumination area for cross sections quantification.
        if method == 'cross_section':
            if probe_area == 'auto':
                parameters = self.metadata.Acquisition_instrument.TEM
                if probe_area in parameters:
                    probe_area = parameters.TEM.probe_area
                else:
                    probe_area = self.get_probe_area(
                        navigation_axes=self.axes_manager.navigation_axes)

        int_stack = utils.stack(intensities,
                                lazy=False,
                                show_progressbar=False)
        comp_old = np.zeros_like(int_stack.data)

        abs_corr_factor = None  # initial

        if method == 'CL':
            quantification_method = utils_eds.quantification_cliff_lorimer
            kwargs = {
                "intensities": int_stack.data,
                "kfactors": factors,
                "absorption_correction": abs_corr_factor,
                "mask": navigation_mask
            }

        elif method == 'zeta':
            quantification_method = utils_eds.quantification_zeta_factor
            kwargs = {
                "intensities": int_stack.data,
                "zfactors": factors,
                "dose": self._get_dose(method),
                "absorption_correction": abs_corr_factor
            }

        elif method == 'cross_section':
            quantification_method = utils_eds.quantification_cross_section
            kwargs = {
                "intensities": int_stack.data,
                "cross_sections": factors,
                "dose": self._get_dose(method, **kwargs),
                "absorption_correction": abs_corr_factor
            }

        else:
            raise ValueError('Please specify method for quantification, '
                             'as "CL", "zeta" or "cross_section".')

        while True:
            results = quantification_method(**kwargs)

            if method == 'CL':
                composition.data = results * 100.
                if absorption_correction:
                    if thickness is not None:
                        mass_thickness = intensities[0].deepcopy()
                        mass_thickness.data = self.CL_get_mass_thickness(
                            composition.split(), thickness)
                        mass_thickness.metadata.General.title = 'Mass thickness'
                    else:
                        raise ValueError(
                            'Thickness is required for absorption correction '
                            'with k-factor method. Results will contain no '
                            'correction for absorption.')

            elif method == 'zeta':
                composition.data = results[0] * 100
                mass_thickness = intensities[0].deepcopy()
                mass_thickness.data = results[1]

            else:
                composition.data = results[0] * 100.
                number_of_atoms = composition._deepcopy_with_new_data(
                    results[1])

            if method == 'cross_section':
                if absorption_correction:
                    abs_corr_factor = utils_eds.get_abs_corr_cross_section(
                        composition.split(), number_of_atoms.split(), toa,
                        probe_area)
                    kwargs["absorption_correction"] = abs_corr_factor
            else:
                if absorption_correction:
                    abs_corr_factor = utils_eds.get_abs_corr_zeta(
                        composition.split(), mass_thickness, toa)
                    kwargs["absorption_correction"] = abs_corr_factor

            res_max = np.max(composition.data - comp_old)
            comp_old = composition.data

            if absorption_correction and show_progressbar:
                pbar.update(1)
            it += 1
            if not absorption_correction or abs(
                    res_max) < convergence_criterion:
                break
            elif it >= max_iterations:
                raise Exception('Absorption correction failed as solution '
                                f'did not converge after {max_iterations} '
                                'iterations')

        if method == 'cross_section':
            number_of_atoms = composition._deepcopy_with_new_data(results[1])
            number_of_atoms = number_of_atoms.split()
            composition = composition.split()
        else:
            composition = composition.split()

        #convert ouput units to selection as required.
        if composition_units == 'atomic':
            if method != 'cross_section':
                composition = utils.material.weight_to_atomic(composition)
        else:
            if method == 'cross_section':
                composition = utils.material.atomic_to_weight(composition)

        #Label each of the elemental maps in the image stacks for composition.
        for i, xray_line in enumerate(xray_lines):
            element, line = utils_eds._get_element_and_line(xray_line)
            composition[i].metadata.General.title = composition_units + \
                ' percent of ' + element
            composition[i].metadata.set_item("Sample.elements", ([element]))
            composition[i].metadata.set_item("Sample.xray_lines",
                                             ([xray_line]))
            if plot_result and composition[i].axes_manager.navigation_size == 1:
                c = np.float(composition[i].data)
                print(
                    f"{element} ({xray_line}): Composition = {c:.2f} percent")
        #For the cross section method this is repeated for the number of atom maps
        if method == 'cross_section':
            for i, xray_line in enumerate(xray_lines):
                element, line = utils_eds._get_element_and_line(xray_line)
                number_of_atoms[i].metadata.General.title = \
                    'atom counts of ' + element
                number_of_atoms[i].metadata.set_item("Sample.elements",
                                                     ([element]))
                number_of_atoms[i].metadata.set_item("Sample.xray_lines",
                                                     ([xray_line]))
        if plot_result and composition[i].axes_manager.navigation_size != 1:
            utils.plot.plot_signals(composition, **kwargs)

        if absorption_correction:
            _logger.info(f'Conversion found after {it} interations.')

        if method == 'zeta':
            mass_thickness.metadata.General.title = 'Mass thickness'
            self.metadata.set_item("Sample.mass_thickness", mass_thickness)
            return composition, mass_thickness
        elif method == 'cross_section':
            return composition, number_of_atoms
        elif method == 'CL':
            if absorption_correction:
                mass_thickness.metadata.General.title = 'Mass thickness'
                return composition, mass_thickness
            else:
                return composition
        else:
            raise ValueError('Please specify method for quantification, as '
                             '"CL", "zeta" or "cross_section"')
Example #19
0
    def estimate_shift2D(self,
                         reference='current',
                         correlation_threshold=None,
                         chunk_size=30,
                         roi=None,
                         normalize_corr=False,
                         sobel=True,
                         medfilter=True,
                         hanning=True,
                         plot=False,
                         dtype='float',
                         show_progressbar=None):
        """Estimate the shifts in a image using phase correlation
        This method can only estimate the shift by comparing
        bidimensional features that should not change position
        between frames. To decrease the memory usage, the time of
        computation and the accuracy of the results it is convenient
        to select a region of interest by setting the roi keyword.
        Parameters
        ----------
        reference : {'current', 'cascade' ,'stat'}
            If 'current' (default) the image at the current
            coordinates is taken as reference. If 'cascade' each image
            is aligned with the previous one. If 'stat' the translation
            of every image with all the rest is estimated and by
            performing statistical analysis on the result the
            translation is estimated.
        correlation_threshold : {None, 'auto', float}
            This parameter is only relevant when `reference` is 'stat'.
            If float, the shift estimations with a maximum correlation
            value lower than the given value are not used to compute
            the estimated shifts. If 'auto' the threshold is calculated
            automatically as the minimum maximum correlation value
            of the automatically selected reference image.
        chunk_size: {None, int}
            If int and `reference`=='stat' the number of images used
            as reference are limited to the given value.
        roi : tuple of ints or floats (left, right, top bottom)
             Define the region of interest. If int(float) the position
             is given axis index(value).
        sobel : bool
            apply a sobel filter for edge enhancement
        medfilter :  bool
            apply a median filter for noise reduction
        hanning : bool
            Apply a 2d hanning filter
        plot : bool
            If True plots the images after applying the filters and
            the phase correlation
        dtype : str or dtype
            Typecode or data-type in which the calculations must be
            performed.
        show_progressbar : None or bool
            If True, display a progress bar. If None the default is set in
            `preferences`.
        Returns
        -------
        list of applied shifts
        Notes
        -----
        The statistical analysis approach to the translation estimation
        when using `reference`='stat' roughly follows [1]_ . If you use
        it please cite their article.
        References
        ----------
        .. [1] Schaffer, Bernhard, Werner Grogger, and Gerald
        Kothleitner. “Automated Spatial Drift Correction for EFTEM
        Signal2D Series.”
        Ultramicroscopy 102, no. 1 (December 2004): 27–36.
        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_two()
        if roi is not None:
            # Get the indices of the roi
            yaxis = self.axes_manager.signal_axes[1]
            xaxis = self.axes_manager.signal_axes[0]
            roi = tuple([xaxis._get_index(i) for i in roi[2:]] +
                        [yaxis._get_index(i) for i in roi[:2]])

        ref = None if reference == 'cascade' else \
            self.__call__().copy()
        shifts = []
        nrows = None
        images_number = self.axes_manager._max_index + 1
        if reference == 'stat':
            nrows = images_number if chunk_size is None else \
                min(images_number, chunk_size)
            pcarray = ma.zeros((nrows, self.axes_manager._max_index + 1,
                                ),
                               dtype=np.dtype([('max_value', np.float),
                                               ('shift', np.int32,
                                                (2,))]))
            nshift, max_value = estimate_image_shift(
                self(),
                self(),
                roi=roi,
                sobel=sobel,
                medfilter=medfilter,
                hanning=hanning,
                normalize_corr=normalize_corr,
                plot=plot,
                dtype=dtype)
            np.fill_diagonal(pcarray['max_value'], max_value)
            pbar_max = nrows * images_number
        else:
            pbar_max = images_number

        # Main iteration loop. Fills the rows of pcarray when reference
        # is stat
        with progressbar(total=pbar_max,
                         disable=not show_progressbar,
                         leave=True) as pbar:
            for i1, im in enumerate(self._iterate_signal()):
                if reference in ['current', 'cascade']:
                    if ref is None:
                        ref = im.copy()
                        shift = np.array([0, 0])
                    nshift, max_val = estimate_image_shift(
                        ref, im, roi=roi, sobel=sobel, medfilter=medfilter,
                        hanning=hanning, plot=plot,
                        normalize_corr=normalize_corr, dtype=dtype)
                    if reference == 'cascade':
                        shift += nshift
                        ref = im.copy()
                    else:
                        shift = nshift
                    shifts.append(shift.copy())
                    pbar.update(1)
                elif reference == 'stat':
                    if i1 == nrows:
                        break
                    # Iterate to fill the columns of pcarray
                    for i2, im2 in enumerate(
                            self._iterate_signal()):
                        if i2 > i1:
                            nshift, max_value = estimate_image_shift(
                                im,
                                im2,
                                roi=roi,
                                sobel=sobel,
                                medfilter=medfilter,
                                hanning=hanning,
                                normalize_corr=normalize_corr,
                                plot=plot,
                                dtype=dtype)

                            pcarray[i1, i2] = max_value, nshift
                        del im2
                        pbar.update(1)
                    del im
        if reference == 'stat':
            # Select the reference image as the one that has the
            # higher max_value in the row
            sqpcarr = pcarray[:, :nrows]
            sqpcarr['max_value'][:] = symmetrize(sqpcarr['max_value'])
            sqpcarr['shift'][:] = antisymmetrize(sqpcarr['shift'])
            ref_index = np.argmax(pcarray['max_value'].min(1))
            self.ref_index = ref_index
            shifts = (pcarray['shift'] +
                      pcarray['shift'][ref_index, :nrows][:, np.newaxis])
            if correlation_threshold is not None:
                if correlation_threshold == 'auto':
                    correlation_threshold = \
                        (pcarray['max_value'].min(0)).max()
                    _logger.info("Correlation threshold = %1.2f",
                                 correlation_threshold)
                shifts[pcarray['max_value'] <
                       correlation_threshold] = ma.masked
                shifts.mask[ref_index, :] = False

            shifts = shifts.mean(0)
        else:
            shifts = np.array(shifts)
            del ref
        return shifts
Example #20
0
 def test_progressbar_shown(self):
     pbar = progressbar.progressbar(maxval=2, disabled=False)
     for i in xrange(2):
         pbar.update(i)
     pbar.finish()
Example #21
0
    def estimate_shift1D(self,
                         start=None,
                         end=None,
                         reference_indices=None,
                         max_shift=None,
                         interpolate=True,
                         number_of_interpolation_points=5,
                         mask=None,
                         show_progressbar=None):
        """Estimate the shifts in the current signal axis using
         cross-correlation.
        This method can only estimate the shift by comparing
        unidimensional features that should not change the position in
        the signal axis. To decrease the memory usage, the time of
        computation and the accuracy of the results it is convenient to
        select the feature of interest providing sensible values for
        `start` and `end`. By default interpolation is used to obtain
        subpixel precision.
        Parameters
        ----------
        start, end : {int | float | None}
            The limits of the interval. If int they are taken as the
            axis index. If float they are taken as the axis value.
        reference_indices : tuple of ints or None
            Defines the coordinates of the spectrum that will be used
            as eference. If None the spectrum at the current
            coordinates is used for this purpose.
        max_shift : int
            "Saturation limit" for the shift.
        interpolate : bool
            If True, interpolation is used to provide sub-pixel
            accuracy.
        number_of_interpolation_points : int
            Number of interpolation points. Warning: making this number
            too big can saturate the memory
        mask : BaseSignal of bool data type.
            It must have signal_dimension = 0 and navigation_shape equal to the
            current signal. Where mask is True the shift is not computed
            and set to nan.
        show_progressbar : None or bool
            If True, display a progress bar. If None the default is set in
            `preferences`.
        Returns
        -------
        An array with the result of the estimation in the axis units.
        Raises
        ------
        SignalDimensionError if the signal dimension is not 1.
        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        ip = number_of_interpolation_points + 1
        axis = self.axes_manager.signal_axes[0]
        self._check_navigation_mask(mask)
        if reference_indices is None:
            reference_indices = self.axes_manager.indices

        i1, i2 = axis._get_index(start), axis._get_index(end)
        shift_array = np.zeros(self.axes_manager._navigation_shape_in_array,
                               dtype=float)
        ref = self.inav[reference_indices].data[i1:i2]
        if interpolate is True:
            ref = interpolate1D(ip, ref)
        with progressbar(total=self.axes_manager.navigation_size,
                         disable=not show_progressbar,
                         leave=True) as pbar:
            for i, (dat, indices) in enumerate(
                    zip(self._iterate_signal(),
                        self.axes_manager._array_indices_generator())):
                if mask is not None and bool(mask.data[indices]) is True:
                    shift_array[indices] = np.nan
                else:
                    dat = dat[i1:i2]
                    if interpolate is True:
                        dat = interpolate1D(ip, dat)
                    shift_array[indices] = np.argmax(
                        np.correlate(ref, dat, 'full')) - len(ref) + 1
                pbar.update(1)

        if max_shift is not None:
            if interpolate is True:
                max_shift *= ip
            shift_array.clip(-max_shift, max_shift)
        if interpolate is True:
            shift_array /= ip
        shift_array *= axis.scale
        return shift_array
Example #22
0
    def estimate_peak_width(self,
                            factor=0.5,
                            window=None,
                            return_interval=False,
                            show_progressbar=None):
        """Estimate the width of the highest intensity of peak
        of the spectra at a given fraction of its maximum.

        It can be used with asymmetric peaks. For accurate results any
        background must be previously substracted.
        The estimation is performed by interpolation using cubic splines.

        Parameters
        ----------
        factor : 0 < float < 1
            The default, 0.5, estimates the FWHM.
        window : None, float
            The size of the window centred at the peak maximum
            used to perform the estimation.
            The window size must be chosen with care: if it is narrower
            than the width of the peak at some positions or if it is
            so wide that it includes other more intense peaks this
            method cannot compute the width and a NaN is stored instead.
        return_interval: bool
            If True, returns 2 extra signals with the positions of the
            desired height fraction at the left and right of the
            peak.
        show_progressbar : None or bool
            If True, display a progress bar. If None the default is set in
            `preferences`.

        Returns
        -------
        width or [width, left, right], depending on the value of
        `return_interval`.

        """

        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        if not 0 < factor < 1:
            raise ValueError("factor must be between 0 and 1.")

        left, right = (self._get_navigation_signal(),
                       self._get_navigation_signal())
        # The signals must be of dtype float to contain np.nan
        left.change_dtype('float')
        right.change_dtype('float')
        axis = self.axes_manager.signal_axes[0]
        x = axis.axis
        maxval = self.axes_manager.navigation_size
        show_progressbar = show_progressbar and maxval > 0
        for i, spectrum in progressbar(enumerate(self),
                                       total=maxval,
                                       disable=not show_progressbar,
                                       leave=True):
            if window is not None:
                vmax = axis.index2value(spectrum.data.argmax())
                spectrum = spectrum.isig[vmax - window / 2.:vmax + window / 2.]
                x = spectrum.axes_manager[0].axis
            spline = scipy.interpolate.UnivariateSpline(
                x, spectrum.data - factor * spectrum.data.max(), s=0)
            roots = spline.roots()
            if len(roots) == 2:
                left.isig[self.axes_manager.indices] = roots[0]
                right.isig[self.axes_manager.indices] = roots[1]
            else:
                left.isig[self.axes_manager.indices] = np.nan
                right.isig[self.axes_manager.indices] = np.nan
        width = right - left
        if factor == 0.5:
            width.metadata.General.title = (self.metadata.General.title +
                                            " FWHM")
            left.metadata.General.title = (self.metadata.General.title +
                                           " FWHM left position")

            right.metadata.General.title = (self.metadata.General.title +
                                            " FWHM right position")
        else:
            width.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum" % factor)

            left.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum left position" % factor)
            right.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum right position" % factor)
        if return_interval is True:
            return [width, left, right]
        else:
            return width
Example #23
0
    def shift1D(self,
                shift_array,
                interpolation_method='linear',
                crop=True,
                expand=False,
                fill_value=np.nan,
                show_progressbar=None):
        """Shift the data in place over the signal axis by the amount specified
        by an array.
        Parameters
        ----------
        shift_array : numpy array
            An array containing the shifting amount. It must have
            `axes_manager._navigation_shape_in_array` shape.
        interpolation_method : str or int
            Specifies the kind of interpolation as a string ('linear',
            'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
            integer specifying the order of the spline interpolator to
            use.
        crop : bool
            If True automatically crop the signal axis at both ends if
            needed.
        expand : bool
            If True, the data will be expanded to fit all data after alignment.
            Overrides `crop`.
        fill_value : float
            If crop is False fill the data outside of the original
            interval with the given value where needed.
        show_progressbar : None or bool
            If True, display a progress bar. If None the default is set in
            `preferences`.
        Raises
        ------
        SignalDimensionError if the signal dimension is not 1.
        """
        if not np.any(shift_array):
            # Nothing to do, the shift array if filled with zeros
            return
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        axis = self.axes_manager.signal_axes[0]

        # Figure out min/max shifts, and translate to shifts in index as well
        minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
        if minimum < 0:
            ihigh = 1 + axis.value2index(axis.high_value + minimum,
                                         rounding=math.floor)
        else:
            ihigh = axis.high_index + 1
        if maximum > 0:
            ilow = axis.value2index(axis.offset + maximum, rounding=math.ceil)
        else:
            ilow = axis.low_index
        if expand:
            padding = []
            for i in range(self.data.ndim):
                if i == axis.index_in_array:
                    padding.append(
                        (axis.high_index - ihigh + 1, ilow - axis.low_index))
                else:
                    padding.append((0, 0))
            self.data = np.pad(self.data,
                               padding,
                               mode='constant',
                               constant_values=(fill_value, ))
            axis.offset += minimum
            axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
        offset = axis.offset
        original_axis = axis.axis.copy()
        with progressbar(total=self.axes_manager.navigation_size,
                         disable=not show_progressbar,
                         leave=True) as pbar:
            for i, (dat, shift) in enumerate(
                    zip(self._iterate_signal(), shift_array.ravel())):
                if np.isnan(shift):
                    continue
                si = sp.interpolate.interp1d(original_axis,
                                             dat,
                                             bounds_error=False,
                                             fill_value=fill_value,
                                             kind=interpolation_method)
                axis.offset = float(offset - shift)
                dat[:] = si(axis.axis)
                pbar.update(1)

        axis.offset = offset

        if crop and not expand:
            self.crop(axis.index_in_axes_manager, ilow, ihigh)

        self.events.data_changed.trigger(obj=self)
Example #24
0
    def estimate_shift2D(self,
                         reference='current',
                         correlation_threshold=None,
                         chunk_size=30,
                         roi=None,
                         normalize_corr=False,
                         sobel=True,
                         medfilter=True,
                         hanning=True,
                         plot=False,
                         dtype='float',
                         show_progressbar=None,
                         sub_pixel_factor=1):
        """Estimate the shifts in a image using phase correlation

        This method can only estimate the shift by comparing
        bidimensional features that should not change position
        between frames. To decrease the memory usage, the time of
        computation and the accuracy of the results it is convenient
        to select a region of interest by setting the roi keyword.

        Parameters
        ----------
        reference : {'current', 'cascade' ,'stat'}
            If 'current' (default) the image at the current
            coordinates is taken as reference. If 'cascade' each image
            is aligned with the previous one. If 'stat' the translation
            of every image with all the rest is estimated and by
            performing statistical analysis on the result the
            translation is estimated.
        correlation_threshold : {None, 'auto', float}
            This parameter is only relevant when reference='stat'.
            If float, the shift estimations with a maximum correlation
            value lower than the given value are not used to compute
            the estimated shifts. If 'auto' the threshold is calculated
            automatically as the minimum maximum correlation value
            of the automatically selected reference image.
        chunk_size : {None, int}
            If int and reference='stat' the number of images used
            as reference are limited to the given value.
        roi : tuple of ints or floats (left, right, top, bottom)
            Define the region of interest. If int(float) the position
            is given axis index(value). Note that ROIs can be used
            in place of a tuple.
        sobel : bool
            apply a sobel filter for edge enhancement
        medfilter :  bool
            apply a median filter for noise reduction
        hanning : bool
            Apply a 2d hanning filter
        plot : bool or 'reuse'
            If True plots the images after applying the filters and
            the phase correlation. If 'reuse', it will also plot the images,
            but it will only use one figure, and continuously update the images
            in that figure as it progresses through the stack.
        dtype : str or dtype
            Typecode or data-type in which the calculations must be
            performed.
        %s
        sub_pixel_factor : float
            Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor
            parts of a pixel. Default is 1, i.e. no sub-pixel accuracy.

        Returns
        -------
        shifts : list of array
            List of estimated shifts

        Notes
        -----
        The statistical analysis approach to the translation estimation
        when using reference='stat' roughly follows [*]_ . If you use
        it please cite their article.

        References
        ----------
        .. [*] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner.
           “Automated Spatial Drift Correction for EFTEM Image Series.”
           Ultramicroscopy 102, no. 1 (December 2004): 27–36.

        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_two()
        if roi is not None:
            # Get the indices of the roi
            yaxis = self.axes_manager.signal_axes[1]
            xaxis = self.axes_manager.signal_axes[0]
            roi = tuple([xaxis._get_index(i) for i in roi[2:]] +
                        [yaxis._get_index(i) for i in roi[:2]])

        ref = None if reference == 'cascade' else \
            self.__call__().copy()
        shifts = []
        nrows = None
        images_number = self.axes_manager._max_index + 1
        if plot == 'reuse':
            # Reuse figure for plots
            plot = plt.figure()
        if reference == 'stat':
            nrows = images_number if chunk_size is None else \
                min(images_number, chunk_size)
            pcarray = ma.zeros((
                nrows,
                self.axes_manager._max_index + 1,
            ),
                               dtype=np.dtype([('max_value', np.float),
                                               ('shift', np.int32, (2, ))]))
            nshift, max_value = estimate_image_shift(
                self(),
                self(),
                roi=roi,
                sobel=sobel,
                medfilter=medfilter,
                hanning=hanning,
                normalize_corr=normalize_corr,
                plot=plot,
                dtype=dtype,
                sub_pixel_factor=sub_pixel_factor)
            np.fill_diagonal(pcarray['max_value'], max_value)
            pbar_max = nrows * images_number
        else:
            pbar_max = images_number

        # Main iteration loop. Fills the rows of pcarray when reference
        # is stat
        with progressbar(total=pbar_max,
                         disable=not show_progressbar,
                         leave=True) as pbar:
            for i1, im in enumerate(self._iterate_signal()):
                if reference in ['current', 'cascade']:
                    if ref is None:
                        ref = im.copy()
                        shift = np.array([0, 0])
                    nshift, max_val = estimate_image_shift(
                        ref,
                        im,
                        roi=roi,
                        sobel=sobel,
                        medfilter=medfilter,
                        hanning=hanning,
                        plot=plot,
                        normalize_corr=normalize_corr,
                        dtype=dtype,
                        sub_pixel_factor=sub_pixel_factor)
                    if reference == 'cascade':
                        shift += nshift
                        ref = im.copy()
                    else:
                        shift = nshift
                    shifts.append(shift.copy())
                    pbar.update(1)
                elif reference == 'stat':
                    if i1 == nrows:
                        break
                    # Iterate to fill the columns of pcarray
                    for i2, im2 in enumerate(self._iterate_signal()):
                        if i2 > i1:
                            nshift, max_value = estimate_image_shift(
                                im,
                                im2,
                                roi=roi,
                                sobel=sobel,
                                medfilter=medfilter,
                                hanning=hanning,
                                normalize_corr=normalize_corr,
                                plot=plot,
                                dtype=dtype,
                                sub_pixel_factor=sub_pixel_factor)
                            pcarray[i1, i2] = max_value, nshift
                        del im2
                        pbar.update(1)
                    del im
        if reference == 'stat':
            # Select the reference image as the one that has the
            # higher max_value in the row
            sqpcarr = pcarray[:, :nrows]
            sqpcarr['max_value'][:] = symmetrize(sqpcarr['max_value'])
            sqpcarr['shift'][:] = antisymmetrize(sqpcarr['shift'])
            ref_index = np.argmax(pcarray['max_value'].min(1))
            self.ref_index = ref_index
            shifts = (pcarray['shift'] +
                      pcarray['shift'][ref_index, :nrows][:, np.newaxis])
            if correlation_threshold is not None:
                if correlation_threshold == 'auto':
                    correlation_threshold = \
                        (pcarray['max_value'].min(0)).max()
                    _logger.info("Correlation threshold = %1.2f",
                                 correlation_threshold)
                shifts[
                    pcarray['max_value'] < correlation_threshold] = ma.masked
                shifts.mask[ref_index, :] = False

            shifts = shifts.mean(0)
        else:
            shifts = np.array(shifts)
            del ref
        return shifts
Example #25
0
    def decomposition(self,
                      normalize_poissonian_noise=False,
                      algorithm="SVD",
                      output_dimension=None,
                      signal_mask=None,
                      navigation_mask=None,
                      get=threaded.get,
                      num_chunks=None,
                      reproject=True,
                      print_info=True,
                      **kwargs):
        """Perform Incremental (Batch) decomposition on the data.

        The results are stored in ``self.learning_results``.

        Read more in the :ref:`User Guide <big_data.decomposition>`.

        Parameters
        ----------
        normalize_poissonian_noise : bool, default False
            If True, scale the signal to normalize Poissonian noise using
            the approach described in [KeenanKotula2004]_.
        algorithm : {'SVD', 'PCA', 'ORPCA', 'ORNMF'}, default 'SVD'
            The decomposition algorithm to use.
        output_dimension : int or None, default None
            Number of components to keep/calculate. If None, keep all
            (only valid for 'SVD' algorithm)
        get : dask scheduler
            the dask scheduler to use for computations;
            default `dask.threaded.get`
        num_chunks : int or None, default None
            the number of dask chunks to pass to the decomposition model.
            More chunks require more memory, but should run faster. Will be
            increased to contain at least ``output_dimension`` signals.
        navigation_mask : {BaseSignal, numpy array, dask array}
            The navigation locations marked as True are not used in the
            decomposition.
        signal_mask : {BaseSignal, numpy array, dask array}
            The signal locations marked as True are not used in the
            decomposition.
        reproject : bool, default True
            Reproject data on the learnt components (factors) after learning.
        print_info : bool, default True
            If True, print information about the decomposition being performed.
            In the case of sklearn.decomposition objects, this includes the
            values of all arguments of the chosen sklearn algorithm.
        **kwargs
            passed to the partial_fit/fit functions.

        References
        ----------
        .. [KeenanKotula2004] M. Keenan and P. Kotula, "Accounting for Poisson noise
            in the multivariate analysis of ToF-SIMS spectrum images", Surf.
            Interface Anal 36(3) (2004): 203-212.

        See Also
        --------
        * :py:meth:`~.learn.mva.MVA.decomposition` for non-lazy signals
        * :py:func:`dask.array.linalg.svd`
        * :py:class:`sklearn.decomposition.IncrementalPCA`
        * :py:class:`~.learn.rpca.ORPCA`
        * :py:class:`~.learn.ornmf.ORNMF`

        """
        if kwargs.get("bounds", False):
            warnings.warn(
                "The `bounds` keyword is deprecated and will be removed "
                "in v2.0. Since version > 1.3 this has no effect.",
                VisibleDeprecationWarning,
            )
            kwargs.pop("bounds", None)

        # Deprecate 'ONMF' for 'ORNMF'
        if algorithm == "ONMF":
            warnings.warn(
                "The argument `algorithm='ONMF'` has been deprecated and will "
                "be removed in future. Please use `algorithm='ORNMF'` instead.",
                VisibleDeprecationWarning,
            )
            algorithm = "ORNMF"

        # Check algorithms requiring output_dimension
        algorithms_require_dimension = ["PCA", "ORPCA", "ORNMF"]
        if algorithm in algorithms_require_dimension and output_dimension is None:
            raise ValueError(
                "`output_dimension` must be specified for '{}'".format(
                    algorithm))

        explained_variance = None
        explained_variance_ratio = None

        _al_data = self._data_aligned_with_axes
        nav_chunks = _al_data.chunks[:self.axes_manager.navigation_dimension]
        sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension:]

        num_chunks = 1 if num_chunks is None else num_chunks
        blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
        nblocks = multiply([len(c) for c in nav_chunks])

        if output_dimension and blocksize / output_dimension < num_chunks:
            num_chunks = np.ceil(blocksize / output_dimension)

        blocksize *= num_chunks

        # Initialize return_info and print_info
        to_return = None
        to_print = [
            "Decomposition info:", "  normalize_poissonian_noise={}".format(
                normalize_poissonian_noise),
            "  algorithm={}".format(algorithm),
            "  output_dimension={}".format(output_dimension)
        ]

        # LEARN
        if algorithm == "PCA":
            if not import_sklearn.sklearn_installed:
                raise ImportError("algorithm='PCA' requires scikit-learn")

            obj = import_sklearn.sklearn.decomposition.IncrementalPCA(
                n_components=output_dimension)
            method = partial(obj.partial_fit, **kwargs)
            reproject = True
            to_print.extend(["scikit-learn estimator:", obj])

        elif algorithm == "ORPCA":
            from hyperspy.learn.rpca import ORPCA

            batch_size = kwargs.pop("batch_size", None)
            obj = ORPCA(output_dimension, **kwargs)
            method = partial(obj.fit, batch_size=batch_size)

        elif algorithm == "ORNMF":
            from hyperspy.learn.ornmf import ORNMF

            batch_size = kwargs.pop("batch_size", None)
            obj = ORNMF(output_dimension, **kwargs)
            method = partial(obj.fit, batch_size=batch_size)

        elif algorithm != "SVD":
            raise ValueError("'algorithm' not recognised")

        original_data = self.data
        try:
            _logger.info("Performing decomposition analysis")

            if normalize_poissonian_noise:
                _logger.info("Scaling the data to normalize Poissonian noise")

                data = self._data_aligned_with_axes
                ndim = self.axes_manager.navigation_dimension
                sdim = self.axes_manager.signal_dimension
                nm = da.logical_not(
                    da.zeros(self.axes_manager.navigation_shape[::-1],
                             chunks=nav_chunks) if navigation_mask is None else
                    to_array(navigation_mask, chunks=nav_chunks))
                sm = da.logical_not(
                    da.zeros(self.axes_manager.signal_shape[::-1],
                             chunks=sig_chunks) if signal_mask is None else
                    to_array(signal_mask, chunks=sig_chunks))
                ndim = self.axes_manager.navigation_dimension
                sdim = self.axes_manager.signal_dimension
                bH, aG = da.compute(
                    data.sum(axis=tuple(range(ndim))),
                    data.sum(axis=tuple(range(ndim, ndim + sdim))),
                )
                bH = da.where(sm, bH, 1)
                aG = da.where(nm, aG, 1)

                raG = da.sqrt(aG)
                rbH = da.sqrt(bH)

                coeff = raG[(..., ) +
                            (None, ) * rbH.ndim] * rbH[(None, ) * raG.ndim +
                                                       (..., )]
                coeff.map_blocks(np.nan_to_num)
                coeff = da.where(coeff == 0, 1, coeff)
                data = data / coeff
                self.data = data

            # LEARN
            if algorithm == "SVD":
                reproject = False
                from dask.array.linalg import svd

                try:
                    self._unfolded4decomposition = self.unfold()
                    # TODO: implement masking
                    if navigation_mask or signal_mask:
                        raise NotImplementedError(
                            "Masking is not yet implemented for lazy SVD")

                    U, S, V = svd(self.data)

                    if output_dimension is None:
                        min_shape = min(min(U.shape), min(V.shape))
                    else:
                        min_shape = output_dimension

                    U = U[:, :min_shape]
                    S = S[:min_shape]
                    V = V[:min_shape]

                    factors = V.T
                    explained_variance = S**2 / self.data.shape[0]
                    loadings = U * S
                finally:
                    if self._unfolded4decomposition is True:
                        self.fold()
                        self._unfolded4decomposition is False
            else:
                this_data = []
                try:
                    for chunk in progressbar(
                            self._block_iterator(
                                flat_signal=True,
                                get=get,
                                signal_mask=signal_mask,
                                navigation_mask=navigation_mask,
                            ),
                            total=nblocks,
                            leave=True,
                            desc="Learn",
                    ):
                        this_data.append(chunk)
                        if len(this_data) == num_chunks:
                            thedata = np.concatenate(this_data, axis=0)
                            method(thedata)
                            this_data = []
                    if len(this_data):
                        thedata = np.concatenate(this_data, axis=0)
                        method(thedata)
                except KeyboardInterrupt:
                    pass

            # GET ALREADY CALCULATED RESULTS
            if algorithm == "PCA":
                explained_variance = obj.explained_variance_
                explained_variance_ratio = obj.explained_variance_ratio_
                factors = obj.components_.T

            elif algorithm == "ORPCA":
                factors, loadings = obj.finish()
                loadings = loadings.T

            elif algorithm == "ORNMF":
                factors, loadings = obj.finish()
                loadings = loadings.T

            # REPROJECT
            if reproject:
                if algorithm == "PCA":
                    method = obj.transform

                    def post(a):
                        return np.concatenate(a, axis=0)

                elif algorithm == "ORPCA":
                    method = obj.project

                    def post(a):
                        return np.concatenate(a, axis=1).T

                elif algorithm == "ORNMF":
                    method = obj.project

                    def post(a):
                        return np.concatenate(a, axis=1).T

                _map = map(
                    lambda thing: method(thing),
                    self._block_iterator(
                        flat_signal=True,
                        get=get,
                        signal_mask=signal_mask,
                        navigation_mask=navigation_mask,
                    ),
                )
                H = []
                try:
                    for thing in progressbar(_map,
                                             total=nblocks,
                                             desc="Project"):
                        H.append(thing)
                except KeyboardInterrupt:
                    pass
                loadings = post(H)

            if explained_variance is not None and explained_variance_ratio is None:
                explained_variance_ratio = explained_variance / explained_variance.sum(
                )

            # RESHUFFLE "blocked" LOADINGS
            ndim = self.axes_manager.navigation_dimension
            if algorithm != "SVD":  # Only needed for online algorithms
                try:
                    loadings = _reshuffle_mixed_blocks(loadings, ndim,
                                                       (output_dimension, ),
                                                       nav_chunks).reshape(
                                                           (-1,
                                                            output_dimension))
                except ValueError:
                    # In case the projection step was not finished, it's left
                    # as scrambled
                    pass
        finally:
            self.data = original_data

        target = self.learning_results
        target.decomposition_algorithm = algorithm
        target.output_dimension = output_dimension
        if algorithm != "SVD":
            target._object = obj
        target.factors = factors
        target.loadings = loadings
        target.explained_variance = explained_variance
        target.explained_variance_ratio = explained_variance_ratio

        # Rescale the results if the noise was normalized
        if normalize_poissonian_noise is True:
            target.factors = target.factors * rbH.ravel()[:, np.newaxis]
            target.loadings = target.loadings * raG.ravel()[:, np.newaxis]

        # Print details about the decomposition we just performed
        if print_info:
            print("\n".join([str(pr) for pr in to_print]))
Example #26
0
def get_feature_separation(
    signal,
    separation_range=(5, 30),
    separation_step=1,
    pca=False,
    subtract_background=False,
    normalize_intensity=False,
    threshold_rel=0.02,
    show_progressbar=True,
):
    """
    Plot the peak positions on in a HyperSpy signal, as a function
    of peak separation.

    Parameters
    ----------
    signal : HyperSpy signal 2D
    separation_range : tuple, optional, default (5, 30)
    separation_step : int, optional, default 1
    pca : bool, default False
    subtract_background : bool, default False
    normalize_intensity : bool, default False
    threshold_rel : float, default 0.02
    show_progressbar : bool, default True

    Example
    -------
    >>> import numpy as np
    >>> import hyperspy.api as hs
    >>> from atomap.atom_finding_refining import get_feature_separation
    >>> s = hs.signals.Signal2D(np.random.random((500, 500)))
    >>> s1 = get_feature_separation(s)

    """
    if separation_range[0] > separation_range[1]:
        raise ValueError(
            "The lower range of the separation_range ({0}) can not be "
            "smaller than the upper range ({1})".format(
                separation_range[0], separation_range[0]))
    if separation_range[0] < 1:
        raise ValueError(
            "The lower range of the separation_range can not be below 1. "
            "Current value: {0}".format(separation_range[0]))

    if signal.data.dtype is np.dtype('float16'):
        raise ValueError("signal has dtype float16, which is not supported "
                         "use signal.change_dtype('float32') to change it")

    separation_list, peak_list = find_features_by_separation(
        signal=signal,
        separation_range=separation_range,
        separation_step=separation_step,
        threshold_rel=threshold_rel,
        pca=pca,
        normalize_intensity=normalize_intensity,
        subtract_background=subtract_background)

    scale_x = signal.axes_manager[0].scale
    scale_y = signal.axes_manager[1].scale
    offset_x = signal.axes_manager[0].offset
    offset_y = signal.axes_manager[1].offset

    s = hs.stack([signal] * len(separation_list))
    s.axes_manager.navigation_axes[0].offset = separation_list[0]
    s.axes_manager.navigation_axes[0].scale = separation_step
    s.axes_manager.navigation_axes[0].name = "Feature separation, [Pixels]"
    s.axes_manager.navigation_axes[0].unit = "Pixels"

    max_peaks = 0
    for peaks in peak_list:
        if len(peaks) > max_peaks:
            max_peaks = len(peaks)
    if max_peaks == 0:
        raise ValueError(
            "No peaks found, try either reducing separation_range, or "
            "using a better image")

    marker_list_x = np.ones((len(peak_list), max_peaks)) * -100
    marker_list_y = np.ones((len(peak_list), max_peaks)) * -100

    for index, peaks in enumerate(peak_list):
        if len(peaks) != 0:
            marker_list_x[index,
                          0:len(peaks)] = (peaks[:, 0] * scale_x) + offset_x
            marker_list_y[index,
                          0:len(peaks)] = (peaks[:, 1] * scale_y) + offset_y

    marker_list = []
    for i in progressbar(range(marker_list_x.shape[1]),
                         disable=not show_progressbar):
        m = hs.markers.point(x=marker_list_x[:, i],
                             y=marker_list_y[:, i],
                             color='red')
        marker_list.append(m)

    s.add_marker(marker_list, permanent=True, plot_marker=False)
    return (s)
Example #27
0
    def estimate_shift1D(self,
                         start=None,
                         end=None,
                         reference_indices=None,
                         max_shift=None,
                         interpolate=True,
                         number_of_interpolation_points=5,
                         mask=None,
                         show_progressbar=None):
        """Estimate the shifts in the current signal axis using
         cross-correlation.
        This method can only estimate the shift by comparing
        unidimensional features that should not change the position in
        the signal axis. To decrease the memory usage, the time of
        computation and the accuracy of the results it is convenient to
        select the feature of interest providing sensible values for
        `start` and `end`. By default interpolation is used to obtain
        subpixel precision.
        Parameters
        ----------
        start, end : {int | float | None}
            The limits of the interval. If int they are taken as the
            axis index. If float they are taken as the axis value.
        reference_indices : tuple of ints or None
            Defines the coordinates of the spectrum that will be used
            as eference. If None the spectrum at the current
            coordinates is used for this purpose.
        max_shift : int
            "Saturation limit" for the shift.
        interpolate : bool
            If True, interpolation is used to provide sub-pixel
            accuracy.
        number_of_interpolation_points : int
            Number of interpolation points. Warning: making this number
            too big can saturate the memory
        mask : BaseSignal of bool data type.
            It must have signal_dimension = 0 and navigation_shape equal to the
            current signal. Where mask is True the shift is not computed
            and set to nan.
        show_progressbar : None or bool
            If True, display a progress bar. If None the default is set in
            `preferences`.
        Returns
        -------
        An array with the result of the estimation in the axis units.
        Raises
        ------
        SignalDimensionError if the signal dimension is not 1.
        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        ip = number_of_interpolation_points + 1
        axis = self.axes_manager.signal_axes[0]
        self._check_navigation_mask(mask)
        if reference_indices is None:
            reference_indices = self.axes_manager.indices

        i1, i2 = axis._get_index(start), axis._get_index(end)
        shift_array = np.zeros(self.axes_manager._navigation_shape_in_array,
                               dtype=float)
        ref = self.inav[reference_indices].data[i1:i2]
        if interpolate is True:
            ref = interpolate1D(ip, ref)
        with progressbar(total=self.axes_manager.navigation_size,
                         disable=not show_progressbar,
                         leave=True) as pbar:
            for i, (dat, indices) in enumerate(zip(
                    self._iterate_signal(),
                    self.axes_manager._array_indices_generator())):
                if mask is not None and bool(mask.data[indices]) is True:
                    shift_array[indices] = np.nan
                else:
                    dat = dat[i1:i2]
                    if interpolate is True:
                        dat = interpolate1D(ip, dat)
                    shift_array[indices] = np.argmax(
                        np.correlate(ref, dat, 'full')) - len(ref) + 1
                pbar.update(1)

        if max_shift is not None:
            if interpolate is True:
                max_shift *= ip
            shift_array.clip(-max_shift, max_shift)
        if interpolate is True:
            shift_array /= ip
        shift_array *= axis.scale
        return shift_array
Example #28
0
def eelsdb(spectrum_type=None,
           title=None,
           author=None,
           element=None,
           formula=None,
           edge=None,
           min_energy=None,
           max_energy=None,
           resolution=None,
           min_energy_compare="gt",
           max_energy_compare="lt",
           resolution_compare="lt",
           max_n=-1,
           monochromated=None,
           order=None,
           order_direction="ASC",
           verify_certificate=True,
           show_progressbar=None):
    r"""Download spectra from the EELS Data Base.

    Parameters
    ----------
    spectrum_type: {'coreloss', 'lowloss', 'zeroloss', 'xrayabs'}, optional
    title: string
        Search spectra titles for a text string.
    author: string, optional
        Search authors for a text string.
    element: string or list of strings, optional
        Filter for the presence of one or more element. Each string must
        correspond with a valid element symbol.
    formula: string
        Chemical formula of the sample.
    edge: {'K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5' 'O2,3', 'O4,5'}, optional
        Filter for spectra with a specific class of edge.
    min_energy, max_energy: float, optional
        Minimum and maximum energy in eV.
    resolution: float, optional
        Energy resolution in eV.
    resolution_compare: {"lt", "eq", "gt"}, optional, default "lt"
        "lt" to search for all spectra with resolution less than `resolution`.
        "eq" for equal, "gt" for greater than.
    min_energy_compare, max_energy_compare: {"lt", "eq", "gt"}, optional
        "lt" to search for all spectra with min/max energy less than
        `min_energy`\`max_energy`. "eq" for equal, "gt" for greater than.
        Deafault values are "gt"/"lt" for `min_energy`\`max_energy`
        respectively.
    monochromated: bool or None (default)
    max_n: int, default -1
        Maximum number of spectra to return. -1 to return all.
    order: string
        Key to sort results by. Valid keys are:
        * "spectrumType",
        * "spectrumMin",
        * "spectrumMax",
        * "stepSize",
        * "spectrumFormula",
        * "spectrumElement",
        * "spectrumUpload",
        * "source_purity",
        * "spectrumEdges",
        * "microscope",
        * "guntype",
        * "beamenergy",
        * "resolution",
        * "monochromated",
        * "acquisition_mode",
        * "convergence",
        * "collection",
        * "probesize",
        * "beamcurrent",
        * "integratetime",
        * "readouts",
        * "detector",
        * "darkcurrent",
        * "gainvariation",
        * "calibration",
        * "zeroloss_deconv",
        * "thickness",
        * "deconv_fourier_log",
        * "deconv_fourier_ratio",
        * "deconv_stephens_deconvolution",
        * "deconv_richardson_lucy",
        * "deconv_maximum_entropy",
        * "deconv_other",
        * "assoc_spectra",
        * "ref_freetext",
        * "ref_doi",
        * "ref_url",
        * "ref_authors",
        * "ref_journal",
        * "ref_volume",
        * "ref_issue",
        * "ref_page",
        * "ref_year",
        * "ref_title",
        * "otherURLs"
    order_direction : {"ASC", "DESC"}
        Sorting `order` direction.
    verify_certificate: bool
        If True, verify the eelsdb website certificate and raise an error
        if it is invalid. If False, continue querying the database if the certificate
        is invalid. (This is a potential security risk.)
    %s



    Returns
    -------
    spectra: list
        A list containing all the spectra matching the given criteria if
        any.

    """
    # Verify arguments
    if spectrum_type is not None and spectrum_type not in {
            'coreloss', 'lowloss', 'zeroloss', 'xrayabs'
    }:
        raise ValueError(
            "spectrum_type must be one of \'coreloss\', \'lowloss\', "
            "\'zeroloss\', \'xrayabs\'.")
    valid_edges = [
        'K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5', 'O2,3', 'O4,5'
    ]
    valid_order_keys = [
        "spectrumType", "spectrumMin", "spectrumMax", "stepSize",
        "spectrumFormula", "spectrumElement", "spectrumUpload",
        "source_purity", "spectrumEdges", "microscope", "guntype",
        "beamenergy", "resolution", "monochromated", "acquisition_mode",
        "convergence", "collection", "probesize", "beamcurrent",
        "integratetime", "readouts", "detector", "darkcurrent",
        "gainvariation", "calibration", "zeroloss_deconv", "thickness",
        "deconv_fourier_log", "deconv_fourier_ratio",
        "deconv_stephens_deconvolution", "deconv_richardson_lucy",
        "deconv_maximum_entropy", "deconv_other", "assoc_spectra",
        "ref_freetext", "ref_doi", "ref_url", "ref_authors", "ref_journal",
        "ref_volume", "ref_issue", "ref_page", "ref_year", "ref_title",
        "otherURLs"
    ]
    if edge is not None and edge not in valid_edges:
        raise ValueError("`edge` must be one of %s." % ", ".join(valid_edges))

    if order is not None and order not in valid_order_keys:
        raise ValueError("`order` must be one of %s." %
                         ", ".join(valid_order_keys))
    if order_direction is not None and order_direction not in ["ASC", "DESC"]:
        raise ValueError("`order_direction` must be \"ASC\" or \"DESC\".")
    for kwarg, label in ((resolution_compare, "resolution_compare"),
                         (min_energy_compare, "min_energy_compare"),
                         (max_energy_compare, "max_energy_compare")):
        if kwarg not in ("lt", "gt", "eq"):
            raise ValueError("`%s` must be \"lt\", \"eq\" or \"gt\"." % label)
    if monochromated is not None:
        monochromated = 1 if monochromated else 0
    params = {
        "type": spectrum_type,
        "title": title,
        "author": author,
        "edge": edge,
        "min_energy": min_energy,
        "max_energy": max_energy,
        "resolution": resolution,
        "resolution_compare": resolution_compare,
        "monochromated": monochromated,
        "formula": formula,
        "min_energy_compare": min_energy_compare,
        "max_energy_compare": max_energy_compare,
        "per_page": max_n,
        "order": order,
        "order_direction": order_direction,
    }

    if isinstance(element, str):
        params["element"] = element
    else:
        params["element[]"] = element

    if show_progressbar is None:
        show_progressbar = preferences.General.show_progressbar

    request = requests.get('http://api.eelsdb.eu/spectra',
                           params=params,
                           verify=verify_certificate)
    spectra = []
    jsons = request.json()
    if "message" in jsons:
        # Invalid query, EELSdb raises error.
        raise IOError(
            "Please report the following error to the HyperSpy developers: "
            f"{jsons['message']}.")

    for json_spectrum in progressbar(jsons, disable=not show_progressbar):
        download_link = json_spectrum['download_link']
        if download_link.split('.')[-1].lower() != 'msa':
            _logger.exception(
                "The source file is not a msa file, please report this error "
                "to http://eelsdb.eu/about with the following details:\n"
                f"Title: {json_spectrum['title']}\nid: {json_spectrum['id']}\n"
                f"Download link: {download_link}\n"
                f"Permalink: {json_spectrum['permalink']}")
            continue
        msa_string = requests.get(download_link,
                                  verify=verify_certificate).text
        try:
            s = dict2signal(parse_msa_string(msa_string)[0])
            emsa = s.original_metadata
            s._original_metadata = type(s.original_metadata)({
                'json':
                json_spectrum
            })
            s.original_metadata.emsa = emsa
            spectra.append(s)

        except:
            # parse_msa_string or dict2signal may fail if the EMSA file is not
            # a valid one.
            _logger.exception(
                "Failed to load the spectrum.\n"
                "Title: %s id: %s.\n"
                "Please report this error to http://eelsdb.eu/about \n" %
                (json_spectrum["title"], json_spectrum["id"]))

    if not spectra:
        _logger.info(
            "The EELS database does not contain any spectra matching your query"
            ". If you have some, why not submitting them "
            "https://eelsdb.eu/submit-data/ ?\n")
    else:
        # Add some info from json to metadata
        # Values with units are not yet supported by HyperSpy (v0.8) so
        # we can't get map those fields.
        for s in spectra:
            if spectrum_type == "xrayabs":
                s.set_signal_type("XAS")
            json_md = s.original_metadata.json
            s.metadata.General.title = json_md.title
            if s.metadata.Signal.signal_type == "EELS":
                if json_md.get_item('elements'):
                    try:
                        # When 'No' is in the list of elements
                        # https://api.eelsdb.eu/spectra/zero-loss-c-feg-hitachi-disp-0-214-ev/
                        if json_md.elements[0].lower() != 'no':
                            s.add_elements(json_md.elements)
                    except ValueError:
                        _logger.exception(
                            "The following spectrum contains invalid chemical "
                            "element information:\n"
                            "Title: %s id: %s. Elements: %s.\n"
                            "Please report this error in "
                            "http://eelsdb.eu/about \n" %
                            (json_md.title, json_md.id, json_md.elements))
                if "collection" in json_md and " mrad" in json_md.collection:
                    beta = float(json_md.collection.replace(" mrad", ""))
                    s.metadata.set_item(
                        "Acquisition_instrument.TEM.Detector.EELS.collection_angle",
                        beta)
                if "convergence" in json_md and " mrad" in json_md.convergence:
                    alpha = float(json_md.convergence.replace(" mrad", ""))
                    s.metadata.set_item(
                        "Acquisition_instrument.TEM.convergence_angle", alpha)
                if "beamenergy" in json_md and " kV" in json_md.beamenergy:
                    beam_energy = float(json_md.beamenergy.replace(" kV", ""))
                    s.metadata.set_item(
                        "Acquisition_instrument.TEM.beam_energy", beam_energy)
            # We don't yet support units, so we cannot map the thickness
            # s.metadata.set_item("Sample.thickness", json_md.thickness)
            s.metadata.set_item("Sample.description", json_md.description)
            s.metadata.set_item("Sample.chemical_formula", json_md.formula)
            s.metadata.set_item("General.author", json_md.author.name)
            s.metadata.set_item("Acquisition_instrument.TEM.microscope",
                                json_md.microscope)

    return spectra
Example #29
0
    def shift1D(self,
                shift_array,
                interpolation_method='linear',
                crop=True,
                expand=False,
                fill_value=np.nan,
                show_progressbar=None):
        """Shift the data in place over the signal axis by the amount specified
        by an array.
        Parameters
        ----------
        shift_array : numpy array
            An array containing the shifting amount. It must have
            `axes_manager._navigation_shape_in_array` shape.
        interpolation_method : str or int
            Specifies the kind of interpolation as a string ('linear',
            'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
            integer specifying the order of the spline interpolator to
            use.
        crop : bool
            If True automatically crop the signal axis at both ends if
            needed.
        expand : bool
            If True, the data will be expanded to fit all data after alignment.
            Overrides `crop`.
        fill_value : float
            If crop is False fill the data outside of the original
            interval with the given value where needed.
        show_progressbar : None or bool
            If True, display a progress bar. If None the default is set in
            `preferences`.
        Raises
        ------
        SignalDimensionError if the signal dimension is not 1.
        """
        if not np.any(shift_array):
            # Nothing to do, the shift array if filled with zeros
            return
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        axis = self.axes_manager.signal_axes[0]

        # Figure out min/max shifts, and translate to shifts in index as well
        minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
        if minimum < 0:
            ihigh = 1 + axis.value2index(
                axis.high_value + minimum,
                rounding=math.floor)
        else:
            ihigh = axis.high_index + 1
        if maximum > 0:
            ilow = axis.value2index(axis.offset + maximum,
                                    rounding=math.ceil)
        else:
            ilow = axis.low_index
        if expand:
            padding = []
            for i in range(self.data.ndim):
                if i == axis.index_in_array:
                    padding.append(
                        (axis.high_index - ihigh + 1, ilow - axis.low_index))
                else:
                    padding.append((0, 0))
            self.data = np.pad(self.data, padding, mode='constant',
                               constant_values=(fill_value,))
            axis.offset += minimum
            axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
        offset = axis.offset
        original_axis = axis.axis.copy()
        with progressbar(total=self.axes_manager.navigation_size,
                         disable=not show_progressbar,
                         leave=True) as pbar:
            for i, (dat, shift) in enumerate(zip(
                    self._iterate_signal(),
                    shift_array.ravel())):
                if np.isnan(shift):
                    continue
                si = sp.interpolate.interp1d(original_axis,
                                             dat,
                                             bounds_error=False,
                                             fill_value=fill_value,
                                             kind=interpolation_method)
                axis.offset = float(offset - shift)
                dat[:] = si(axis.axis)
                pbar.update(1)

        axis.offset = offset

        if crop and not expand:
            self.crop(axis.index_in_axes_manager,
                      ilow,
                      ihigh)

        self.events.data_changed.trigger(obj=self)
Example #30
0
    def estimate_peak_width(self,
                            factor=0.5,
                            window=None,
                            return_interval=False,
                            show_progressbar=None):
        """Estimate the width of the highest intensity of peak
        of the spectra at a given fraction of its maximum.

        It can be used with asymmetric peaks. For accurate results any
        background must be previously substracted.
        The estimation is performed by interpolation using cubic splines.

        Parameters
        ----------
        factor : 0 < float < 1
            The default, 0.5, estimates the FWHM.
        window : None, float
            The size of the window centred at the peak maximum
            used to perform the estimation.
            The window size must be chosen with care: if it is narrower
            than the width of the peak at some positions or if it is
            so wide that it includes other more intense peaks this
            method cannot compute the width and a NaN is stored instead.
        return_interval: bool
            If True, returns 2 extra signals with the positions of the
            desired height fraction at the left and right of the
            peak.
        show_progressbar : None or bool
            If True, display a progress bar. If None the default is set in
            `preferences`.

        Returns
        -------
        width or [width, left, right], depending on the value of
        `return_interval`.

        """

        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_one()
        if not 0 < factor < 1:
            raise ValueError("factor must be between 0 and 1.")

        left, right = (self._get_navigation_signal(),
                       self._get_navigation_signal())
        # The signals must be of dtype float to contain np.nan
        left.change_dtype('float')
        right.change_dtype('float')
        axis = self.axes_manager.signal_axes[0]
        x = axis.axis
        maxval = self.axes_manager.navigation_size
        show_progressbar = show_progressbar and maxval > 0
        for i, spectrum in progressbar(enumerate(self),
                                       total=maxval,
                                       disable=not show_progressbar,
                                       leave=True):
            if window is not None:
                vmax = axis.index2value(spectrum.data.argmax())
                spectrum = spectrum.isig[vmax - window / 2.:vmax + window / 2.]
                x = spectrum.axes_manager[0].axis
            spline = scipy.interpolate.UnivariateSpline(
                x,
                spectrum.data - factor * spectrum.data.max(),
                s=0)
            roots = spline.roots()
            if len(roots) == 2:
                left.isig[self.axes_manager.indices] = roots[0]
                right.isig[self.axes_manager.indices] = roots[1]
            else:
                left.isig[self.axes_manager.indices] = np.nan
                right.isig[self.axes_manager.indices] = np.nan
        width = right - left
        if factor == 0.5:
            width.metadata.General.title = (
                self.metadata.General.title + " FWHM")
            left.metadata.General.title = (
                self.metadata.General.title + " FWHM left position")

            right.metadata.General.title = (
                self.metadata.General.title + " FWHM right position")
        else:
            width.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum" % factor)

            left.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum left position" % factor)
            right.metadata.General.title = (
                self.metadata.General.title +
                " full-width at %.1f maximum right position" % factor)
        if return_interval is True:
            return [width, left, right]
        else:
            return width
Example #31
0
 def test_progressbar_shown(self):
     pbar = progressbar.progressbar(maxval=2, disabled=False)
     for i in range(2):
         pbar.update(i)
     pbar.finish()
Example #32
0
def multifit(
    model,
    firstfit=False,
    bounded=False,
    fetch_only_fixed=False,
    show_progressbar=True,
    iterpath=None,
    **kwargs,
):
    '''
    Replica of multi-dimensional fit function from hyperspy/model.py, massively
    simplified, with multiple assumed variable values.

    Arguments:
    model -- The model to be fitted, this needs to be passed as fit(),
             in this case, is not a member function of the Model class
             as it is in model.py
    firstfit -- Flag indicating whether the call to this function pertains to the
                first level of the parent-child algorithm.
    bounded -- Flag indicating whether the fit performed should be bounded or not.
    fetch_only_fixed -- Flag indicating whether to fetch only fixed parameters.
    show_progressbar -- Flag indicating whether to show a progress bar or not.
    iterpath -- Flag indicating the path that the iteration through the 2D image
                should take, either "flyback" or "serpentine".

    Output:
    None
    '''

    # Setup progress bar and iteration path
    maxval = model.axes_manager.navigation_size
    show_progressbar = show_progressbar and (maxval > 0)
    model.axes_manager._iterpath = iterpath

    NavAxesSize = model.axes_manager.navigation_axes[0].size

    # Initialize and set inherited parameters.
    inherited_params = np.zeros(
        (NavAxesSize, NavAxesSize, len(model[0].free_parameters)))
    for index in model.axes_manager:
        for count, param in enumerate(model[0].free_parameters):
            inherited_params[index][count] = param.map["values"][index]

    # Main loop
    i = 0
    with model.axes_manager.events.indices_changed.suppress_callback(
            model.fetch_stored_values):
        # Unclear what these do as they relate to context managers.
        outer = model.suspend_update
        inner = dummy_context_manager

        with outer(update_on_resume=True):
            with progressbar(total=maxval,
                             disable=not show_progressbar,
                             leave=True) as pbar:
                # Original parameters: (1, 0, 0, 1, 0 ,0). To be used if the function is called
                # for the first level of the parent-child algorithm.
                orig_params = \
                    tuple([param.value for param in model[0].free_parameters])
                for index in model.axes_manager:  # iterate through the pixels in the 2D image

                    with inner(update_on_resume=True):

                        model.fetch_stored_values(only_fixed=fetch_only_fixed)

                        # Conditional calls to either fit() or boundedfit()
                        if bounded:
                            if firstfit:
                                fit_results = boundedfit(model,
                                                         old_p1=orig_params,
                                                         **kwargs)
                            else:
                                fit_results = boundedfit(
                                    model,
                                    old_p1=inherited_params[index],
                                    **kwargs)
                        else:
                            if firstfit:
                                fit_results = fit(model,
                                                  old_p1=orig_params,
                                                  **kwargs)
                            else:
                                fit_results = fit(
                                    model,
                                    old_p1=inherited_params[index],
                                    **kwargs)

                        # Update the progress bar.
                        i += 1
                        pbar.update(1)