Beispiel #1
0
    def get_reduced_intensity(self):
        """Obtains a reduced intensity profile from the radial profile.

        Parameters
        ----------
        s_cutoff : list of float
                    A list of the form [s_min, s_max] to change the s_cutoff
                    from the fit.
        Returns
        -------
        ri : ReducedIntensity1D
        """

        s_scale = self.signal.axes_manager.signal_axes[0].scale
        s = np.arange(self.signal.axes_manager.signal_axes[0].size,
                      dtype='float64')
        s *= self.signal.axes_manager.signal_axes[0].scale

        reduced_intensity = (2 * np.pi * s * np.divide(
            (self.signal.data - self.background_fit), self.normalisation))

        ri = ReducedIntensity1D(reduced_intensity)
        ri = transfer_navigation_axes(ri, self.signal)
        ri = transfer_signal_axes(ri, self.signal)

        return ri
Beispiel #2
0
    def rotate_strain_basis(self, x_new):
        """ Rotates a strain map to a new basis.

        Parameters
        ----------
        x_new : list
            The coordinates of a point on the new 'x' axis

        Returns
        -------
        StrainMap :
            StrainMap in the new (rotated) basis.

        Notes
        -----
        Conventions are described in the class documentation.

        We follow mathmatical formalism described in:
        "https://www.continuummechanics.org/stressxforms.html" (August 2019)
        """
        def apply_rotation(transposed_strain_map, R):
            """ Rotates a strain matrix to a new basis, for which R maps x_old to x_new """
            sigmaxx_old = transposed_strain_map[0]
            sigmayy_old = transposed_strain_map[1]
            sigmaxy_old = transposed_strain_map[2]

            z = np.asarray([[sigmaxx_old, sigmaxy_old],
                            [sigmaxy_old, sigmayy_old]])

            new = np.matmul(R.T, np.matmul(z, R))
            return [new[0, 0], new[1, 1], new[0, 1], transposed_strain_map[3]]

        def apply_rotation_complete(self, R):
            """ Mapping solution to return a (unclassed) strain map in a new basis """
            from hyperspy.api import transpose

            transposed = transpose(self)[0]
            transposed_to_new_basis = transposed.map(apply_rotation,
                                                     R=R,
                                                     inplace=False)
            return transposed_to_new_basis.T

        """ Core functionality """

        if self.current_basis_x != [1, 0]:
            # this takes us back to [1,0] if our current map is in a diferent basis
            R = _get_rotation_matrix(self.current_basis_x).T
            strain_map_core = apply_rotation_complete(self, R)
        else:
            strain_map_core = self

        R = _get_rotation_matrix(x_new)
        transposed_to_new_basis = apply_rotation_complete(strain_map_core, R)
        meta_dict = self.metadata.as_dictionary()

        strainmap = StrainMap(transposed_to_new_basis,
                              current_basis_x=x_new,
                              metadata=meta_dict)
        return transfer_signal_axes(strainmap, self)
Beispiel #3
0
    def get_diffraction_variance(self, dqe, set_data_type=None):
        """Calculates the variance in scattered intensity as a function of
        scattering vector.

        Parameters
        ----------

        dqe : float
            Detective quantum efficiency of the detector for Poisson noise
            correction.
        data_type : numpy data type.
            For numpy data types, see
            https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html.
            This is incorporated as squaring the numbers in meansq_dp results
            in considerably larger than the ones in the original array. This can
            result in an overflow error that is difficult to distinguish. Hence
            the data can be converted to a different data type to accommodate.



        Returns
        -------

        vardps : DiffractionVariance2D
            A DiffractionVariance2D object containing the mean DP, mean
            squared DP, and variance DP.
        """

        dp = self.signal
        mean_dp = dp.mean((0, 1))
        if set_data_type is None:
            meansq_dp = Signal2D(np.square(dp.data)).mean((0, 1))
        else:
            meansq_dp = Signal2D(np.square(
                dp.data.astype(set_data_type))).mean((0, 1))

        normvar = (meansq_dp.data / np.square(mean_dp.data)) - 1.
        var_dp = Signal2D(normvar)
        corr_var_array = var_dp.data - (np.divide(dqe, mean_dp.data))
        corr_var_array[np.isinf(corr_var_array)] = 0
        corr_var_array[np.isnan(corr_var_array)] = 0
        corr_var = Signal2D(corr_var_array)
        vardps = stack((mean_dp, meansq_dp, var_dp, corr_var))
        sig_x = vardps.data.shape[1]
        sig_y = vardps.data.shape[2]

        dv = DiffractionVariance2D(vardps.data.reshape((2, 2, sig_x, sig_y)))

        dv = transfer_signal_axes(dv, self.signal)

        return dv
Beispiel #4
0
    def get_vector_vdf_images(self, radius, normalize=False):
        """Obtain the intensity scattered to each diffraction vector at each
        navigation position in an ElectronDiffraction2D Signal by summation in a
        circular window of specified radius.

        Parameters
        ----------
        radius : float
            Radius of the integration window in reciprocal angstroms.

        normalize : boolean
            If True each VDF image is normalized so that the maximum intensity
            in each VDF is 1.

        Returns
        -------
        vdfs : VDFImage
            VDFImage object containing virtual dark field images for all unique
            vectors.
        """
        if self.vectors:
            vdfs = []
            for v in self.vectors.data:
                disk = roi.CircleROI(cx=v[0], cy=v[1], r=radius, r_inner=0)
                vdf = disk(self.signal,
                           axes=self.signal.axes_manager.signal_axes)
                vdfs.append(vdf.sum((2, 3)).as_signal2D((0, 1)).data)

            vdfim = VDFImage(np.asarray(vdfs))

            if normalize:
                vdfim.map(normalize_vdf)

        else:
            raise ValueError(
                "DiffractionVectors not specified by user. Please "
                "initialize VDFGenerator with some vectors. ")

        # Set calibration to same as signal
        vdfim = transfer_navigation_axes_to_signal_axes(vdfim, self.signal)

        # Assign vectors used to generate images to vdfim attribute.
        vdfim.vectors = self.vectors
        vdfim.vectors = transfer_signal_axes(vdfim.vectors, self.vectors)

        return vdfim
Beispiel #5
0
    def get_vdf_segments(
        self,
        min_distance=1,
        min_size=1,
        max_size=np.inf,
        max_number_of_grains=np.inf,
        marker_radius=1,
        threshold=False,
        exclude_border=False,
    ):
        """Separate segments from each of the VDF images using
        edge-detection by the Sobel transform and the watershed
        segmentation method implemented in scikit-image [1,2]. Obtain a
        VDFSegment, similar to VDFImage, but where each image is a
        segment of a VDF and the vectors correspond to each segment and
        are not necessarily unique.

        Parameters
        ----------
        min_distance: int
            Minimum distance (in pixels) between grains required for
            them to be considered as separate grains.
        min_size : float
            Grains with size (i.e. total number of pixels) below
            min_size are discarded.
        max_size : float
            Grains with size (i.e. total number of pixels) above
            max_size are discarded.
        max_number_of_grains : int
            Maximum number of grains included in the returned separated
            grains. If it is exceeded, those with highest peak
            intensities will be returned.
        marker_radius : float
            If 1 or larger, each marker for watershed is expanded to a disk
            of radius marker_radius. marker_radius should not exceed
            2*min_distance.
        threshold: bool
            If True, a mask is calculated by thresholding the VDF image
            by the Li threshold method in scikit-image. If False
            (default), the mask is the boolean VDF image.
        exclude_border : int or True, optional
            If non-zero integer, peaks within a distance of
            exclude_border from the boarder will be discarded. If True,
            peaks at or closer than min_distance of the boarder, will be
            discarded.

        References
        ----------
        [1] http://scikit-image.org/docs/dev/auto_examples/segmentation/
            plot_watershed.html
        [2] http://scikit-image.org/docs/dev/auto_examples/xx_applications/
            plot_coins_segmentation.html#sphx-glr-auto-examples-xx-
            applications-plot-coins-segmentation-py

        Returns
        -------
        vdfsegs : VDFSegment
            VDFSegment object containing segments (i.e. grains) of
            single virtual dark field images with corresponding vectors.
        """
        vdfs = self.copy()
        vectors = self.vectors.data

        # TODO : Add aperture radius as an attribute of VDFImage?

        # Create an array of length equal to the number of vectors where each
        # element is a np.object with shape (n: number of segments for this
        # VDFImage, VDFImage size x, VDFImage size y).
        vdfsegs = np.array(
            vdfs.map(
                separate_watershed,
                show_progressbar=True,
                inplace=False,
                min_distance=min_distance,
                min_size=min_size,
                max_size=max_size,
                max_number_of_grains=max_number_of_grains,
                marker_radius=marker_radius,
                threshold=threshold,
                exclude_border=exclude_border,
            ),
            dtype=np.object,
        )

        segments, vectors_of_segments = [], []
        for i, vector in zip(np.arange(vectors.size), vectors):
            segments = np.append(segments, vdfsegs[i])
            num_segs = np.shape(vdfsegs[i])[0]
            vectors_of_segments = np.append(
                vectors_of_segments, np.broadcast_to(vector, (num_segs, 2))
            )

        vectors_of_segments = vectors_of_segments.reshape((-1, 2))
        segments = segments.reshape(
            (
                np.shape(vectors_of_segments)[0],
                vdfs.axes_manager.signal_shape[0],
                vdfs.axes_manager.signal_shape[1],
            )
        )
        # Calculate the total intensities of each segment
        segment_intensities = np.array(
            [[np.sum(x, axis=(0, 1))] for x in segments], dtype="object"
        )

        # if TraitError is raised, it is likely no segments were found
        segments = Signal2D(segments).transpose(navigation_axes=[0], signal_axes=[2, 1])
        # Create VDFSegment and transfer axes calibrations
        vdfsegs = VDFSegment(
            segments, DiffractionVectors(vectors_of_segments), segment_intensities
        )
        vdfsegs.segments = transfer_signal_axes(vdfsegs.segments, vdfs)
        n = vdfsegs.segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"
        vdfsegs.vectors_of_segments.axes_manager.set_signal_dimension(1)
        vdfsegs.vectors_of_segments = transfer_signal_axes(
            vdfsegs.vectors_of_segments, self.vectors
        )
        n = vdfsegs.vectors_of_segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"

        return vdfsegs
Beispiel #6
0
    def correlate_vdf_segments(self,
                               corr_threshold=0.7,
                               vector_threshold=4,
                               segment_threshold=3):
        """Iterates through VDF segments and sums those that are
        associated with the same segment. Summation will be done for
        those segments that have a normalised cross correlation above
        corr_threshold. The vectors of each segment sum will be updated
        accordingly, so that the vectors of each resulting segment sum
        are all the vectors of the original individual segments. Each
        vector is assigned an intensity that is the integrated intensity
        of the segment it originates from.

        Parameters
        ----------
        corr_threshold : float
            Segments will be summed if they have a normalized cross-
            correlation above corr_threshold. Must be between 0 and 1.
        vector_threshold : int, optional
            Correlated segments having a number of vectors less than
            vector_threshold will be discarded.
        segment_threshold : int, optional
            Correlated segment intensities that lie in a region where
            a number of segments less than segment_thresholdhave been
            found, are set to 0, i.e. the resulting segment will only
            have intensities above 0 where at least a number of
            segment_threshold segments have intensitives above 0.

        Returns
        -------
        vdfseg : VDFSegment
            The VDFSegment instance updated according to the image
            correlation results.
        """
        vectors = self.vectors_of_segments.data

        if segment_threshold > vector_threshold:
            raise ValueError("segment_threshold must be smaller than or "
                             "equal to vector_threshold.")

        segments = self.segments.data.copy()
        num_vectors = np.shape(vectors)[0]
        gvectors = np.array(np.empty(num_vectors, dtype=object))
        vector_indices = np.array(np.empty(num_vectors, dtype=object))

        for i in np.arange(num_vectors):
            gvectors[i] = np.array(vectors[i].copy())
            vector_indices[i] = np.array([i], dtype=int)

        correlated_segments = np.zeros_like(segments[:1])
        correlated_vectors = np.array([0.0], dtype=object)
        correlated_vectors[0] = np.array(np.zeros_like(vectors[:1]))
        correlated_vector_indices = np.array([0], dtype=object)
        correlated_vector_indices[0] = np.array([0])
        i = 0
        pbar = tqdm(total=np.shape(segments)[0])
        while np.shape(segments)[0] > i:
            # For each segment, calculate the normalized cross-correlation to
            # all other segments, and define add_indices for those with a value
            # above corr_threshold.
            corr_list = list(
                map(lambda x: norm_cross_corr(x, template=segments[i]),
                    segments))

            corr_add = list(map(lambda x: x > corr_threshold, corr_list))
            add_indices = np.where(corr_add)
            # If there are more add_indices than vector_threshold,
            # sum segments and add their vectors. Otherwise, discard segment.
            if (np.shape(add_indices[0])[0] >= vector_threshold
                    and np.shape(add_indices[0])[0] > 1):
                new_segment = np.array([np.sum(segments[add_indices], axis=0)])
                if segment_threshold > 1:
                    segment_check = np.zeros_like(segments[add_indices],
                                                  dtype=int)
                    segment_check[np.where(segments[add_indices])] = 1
                    segment_check = np.sum(segment_check, axis=0, dtype=int)
                    segment_mask = np.zeros_like(segments[0], dtype=bool)
                    segment_mask[np.where(
                        segment_check >= segment_threshold)] = 1
                    new_segment = new_segment * segment_mask
                correlated_segments = np.append(correlated_segments,
                                                new_segment,
                                                axis=0)
                add_indices = add_indices[0]
                new_vectors = np.array([0], dtype=object)
                new_vectors[0] = np.concatenate(gvectors[add_indices],
                                                axis=0).reshape(-1, 2)
                correlated_vectors = np.append(correlated_vectors,
                                               new_vectors,
                                               axis=0)
                new_indices = np.array([0], dtype=object)
                new_indices[0] = np.concatenate(vector_indices[add_indices],
                                                axis=0).reshape(-1, 1)
                correlated_vector_indices = np.append(
                    correlated_vector_indices, new_indices, axis=0)
            elif np.shape(add_indices[0])[0] >= vector_threshold:
                add_indices = add_indices[0]
                correlated_segments = np.append(correlated_segments,
                                                segments[add_indices],
                                                axis=0)
                correlated_vectors = np.append(correlated_vectors,
                                               gvectors[add_indices],
                                               axis=0)
                correlated_vector_indices = np.append(
                    correlated_vector_indices,
                    vector_indices[add_indices],
                    axis=0)
            else:
                add_indices = i
            segments = np.delete(segments, add_indices, axis=0)
            gvectors = np.delete(gvectors, add_indices, axis=0)
            vector_indices = np.delete(vector_indices, add_indices, axis=0)

        pbar.close()
        correlated_segments = np.delete(correlated_segments, 0, axis=0)
        correlated_vectors = np.delete(correlated_vectors, 0, axis=0)
        correlated_vector_indices = np.delete(correlated_vector_indices,
                                              0,
                                              axis=0)
        correlated_vector_intensities = np.array(np.empty(
            len(correlated_vectors)),
                                                 dtype=object)

        # Sum the intensities in the original segments and assign those to the
        # correct vectors by referring to vector_indices.
        # If segment_mask has been used, use the segments as masks too.
        if segment_threshold > 1:
            for i in range(len(correlated_vectors)):
                correlated_vector_intensities[i] = np.zeros(
                    len(correlated_vector_indices[i]))
                segment_mask = np.zeros_like(segment_mask)
                segment_mask[np.where(correlated_segments[i])] = 1
                segment_intensities = np.sum(self.segments.data * segment_mask,
                                             axis=(1, 2))
                for n, index in zip(
                        range(len(correlated_vector_indices[i])),
                        correlated_vector_indices[i],
                ):
                    correlated_vector_intensities[i][n] = np.sum(
                        segment_intensities[index])
        else:
            segment_intensities = np.sum(self.segments.data, axis=(1, 2))
            for i in range(len(correlated_vectors)):
                correlated_vector_intensities[i] = np.zeros(
                    len(correlated_vector_indices[i]))
                for n, index in zip(
                        range(len(correlated_vector_indices[i])),
                        correlated_vector_indices[i],
                ):
                    correlated_vector_intensities[i][n] = np.sum(
                        segment_intensities[index])

        vdfseg = VDFSegment(
            Signal2D(correlated_segments),
            DiffractionVectors(correlated_vectors),
            correlated_vector_intensities,
        )

        # Transfer axes properties of segments
        vdfseg.segments = transfer_signal_axes(vdfseg.segments, self.segments)
        n = vdfseg.segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"

        return vdfseg