Ejemplo n.º 1
0
def test_vector_get_indexed_diffraction_vectors(overwrite, result_hkl,
                                                current_hkl, expected_hkl):
    match_results = VectorMatchingResults(np.array([[1], [2]]))
    match_results.hkls = result_hkl
    vectors = DiffractionVectors(np.array([[1], [2]]))
    vectors.hkls = current_hkl
    match_results.get_indexed_diffraction_vectors(vectors, overwrite)
    np.testing.assert_allclose(vectors.hkls, expected_hkl)
Ejemplo n.º 2
0
def test_vector_indexation_generator_init():
    vectors = DiffractionVectors([[1], [2]])
    vectors.cartesian = [[1], [2]]
    vector_library = DiffractionVectorLibrary()
    vector_indexation_generator = VectorIndexationGenerator(vectors, vector_library)
    assert isinstance(vector_indexation_generator, VectorIndexationGenerator)
    assert vector_indexation_generator.vectors == vectors
    assert vector_indexation_generator.library == vector_library
Ejemplo n.º 3
0
def test_vector_get_indexed_diffraction_vectors_warn():
    match_results = VectorMatchingResults(np.array([[1], [2]]))
    match_results.hkls = [0, 0, 1]
    vectors = DiffractionVectors(np.array([[1], [2]]))
    vectors.hkls = [0, 0, 0]
    with pytest.warns(Warning):
        match_results.get_indexed_diffraction_vectors(vectors)
    np.testing.assert_allclose(vectors.hkls, [0, 0, 0])
Ejemplo n.º 4
0
def test_vector_indexation_generator_index_vectors(vector_match_peaks,
                                                   vector_library):
    # vectors not used directly
    vectors = DiffractionVectors(np.array(vector_match_peaks[:, :2]))
    vectors.cartesian = DiffractionVectors(np.array(vector_match_peaks))
    gen = VectorIndexationGenerator(vectors, vector_library)
    indexation = gen.index_vectors(mag_tol=0.1,
                                   angle_tol=6,
                                   index_error_tol=0.3,
                                   n_peaks_to_index=2,
                                   n_best=5)

    # Values are tested directly on the match_vector in the util tests
    assert isinstance(indexation.vectors, DiffractionVectors)

    # (n_best=1, 5 result values from each)
    np.testing.assert_equal(indexation.data.shape, (5, ))

    # n_best=1, 3 peaks with hkl)
    np.testing.assert_equal(indexation.hkls.shape, (1, 3, 3))

    refined1 = gen.refine_n_best_orientations(indexation, 1.0, 1.0, n_best=0)

    assert isinstance(refined1.vectors, DiffractionVectors)
    np.testing.assert_equal(refined1.data.shape, (5, ))

    refined2 = gen.refine_best_orientation(indexation, 1.0, 1.0)

    assert isinstance(refined2.vectors, DiffractionVectors)
    np.testing.assert_equal(refined2.data.shape, (1, ))
    assert isinstance(refined2.data[0], OrientationResult)

    assert refined2.data[0].phase_index == indexation.data[0].phase_index
    assert refined2.data[0].match_rate == indexation.data[0].match_rate

    # Must use a large tolerance here, because there are only 3 vectors
    np.testing.assert_almost_equal(
        np.diag(refined1.data[0].rotation_matrix),
        np.diag(indexation.data[0].rotation_matrix),
        1,
    )
    np.testing.assert_almost_equal(
        np.diag(refined2.data[0].rotation_matrix),
        np.diag(indexation.data[0].rotation_matrix),
        1,
    )
Ejemplo n.º 5
0
def test_vector_indexation_generator_cartesian_check():
    vectors = DiffractionVectors([[1], [2]])
    vector_library = DiffractionVectorLibrary()

    with pytest.raises(
        ValueError,
        match="Cartesian coordinates are required in order to index diffraction vectors",
    ):
        vector_indexation_generator = VectorIndexationGenerator(vectors, vector_library)
Ejemplo n.º 6
0
def test_vector_indexation_generator_index_vectors(vector_match_peaks,
                                                   vector_library):
    # vectors not used directly
    vectors = DiffractionVectors(np.array(vector_match_peaks[:, :2]))
    vectors.cartesian = DiffractionVectors(np.array(vector_match_peaks))
    gen = VectorIndexationGenerator(vectors, vector_library)
    indexation = gen.index_vectors(mag_tol=0.1,
                                   angle_tol=6,
                                   index_error_tol=0.3,
                                   n_peaks_to_index=2,
                                   n_best=1)

    # Values are tested directly on the match_vector in the util tests
    assert isinstance(indexation.vectors, DiffractionVectors)
    # (n_best=1, 5 result values from each)
    np.testing.assert_equal(indexation.data.shape, (1, 5))
    # n_best=1, 3 peaks with hkl)
    np.testing.assert_equal(indexation.hkls.shape, (1, 3, 3))
Ejemplo n.º 7
0
    def test_out_of_range_vectors_DiffractionVectors(self):
        """Test that putting vectors that lie outside of the
        diffraction patterns raises a ValueError"""
        vectors = DiffractionVectors(np.array([[1, -100]]))
        dp = ElectronDiffraction2D(np.ones((20, 20)))

        with pytest.raises(
            ValueError,
            match="Some of your vectors do not lie within your diffraction pattern",
        ):
            sprg = SubpixelrefinementGenerator(dp, vectors)
def get_vector_match_results(structure, rot_list, edc):
    diffraction_library = get_template_library(structure, rot_list, edc)
    peak_lists = []
    for pixel_coords in diffraction_library['A']['pixel_coords']:
        peak_lists.append(pixel_coords)
    peaks = DiffractionVectors((np.array([peak_lists, peak_lists]) - half_side_length) / half_side_length)
    peaks.axes_manager.set_signal_dimension(2)
    peaks.calculate_cartesian_coordinates(200, 0.2)
    peaks.cartesian.axes_manager.set_signal_dimension(2)
    structure_library = StructureLibrary(['A'], [structure], [[]])
    library_generator = VectorLibraryGenerator(structure_library)
    vector_library = library_generator.get_vector_library(1)
    indexation_generator = VectorIndexationGenerator(peaks, vector_library)
    indexation = indexation_generator.index_vectors(
        mag_tol=1.5 / half_side_length,
        angle_tol=1,
        index_error_tol=0.2,
        n_peaks_to_index=5,
        n_best=2)
    return diffraction_library, indexation
Ejemplo n.º 9
0
def test_vdf_generator_from_map(diffraction_pattern):
    dvm = DiffractionVectors(
        np.array(
            [[np.array([[1, 1], [2, 2]]),
              np.array([[1, 1], [2, 2], [1, 2]])],
             [np.array([[1, 1], [2, 2]]),
              np.array([[1, 1], [2, 2]])]],
            dtype=object))
    dvm.axes_manager.set_signal_dimension(0)

    vdfgen = VDFGenerator(diffraction_pattern, dvm)
    assert isinstance(vdfgen, VDFGenerator)
Ejemplo n.º 10
0
    def test_vdf_generator_init_with_vectors(self, diffraction_pattern):
        dvm = DiffractionVectors(
            np.array([[
                np.array([[1, 1], [2, 2]]),
                np.array([[1, 1], [2, 2], [1, 2]])
            ], [np.array([[1, 1], [2, 2]]),
                np.array([[1, 1], [2, 2]])]],
                     dtype=object))
        dvm.axes_manager.set_signal_dimension(0)

        vdfgen = VDFGenerator(diffraction_pattern, dvm)
        assert isinstance(vdfgen.signal, ElectronDiffraction2D)
        assert isinstance(vdfgen.vectors, DiffractionVectors)
Ejemplo n.º 11
0
    def test_wrong_navigation_dimensions(self):
        """Tests that navigation dimensions must be appropriate too."""
        dp = ElectronDiffraction2D(np.zeros((2, 2, 8, 8)))
        vectors = DiffractionVectors(np.zeros((1, 2)))
        dp.axes_manager.set_signal_dimension(2)
        vectors.axes_manager.set_signal_dimension(0)

        # Note - uses regex via re.search()
        with pytest.raises(
            ValueError,
            match=r"Vectors with shape .* must have the same navigation shape as .*",
        ):
            sprg = SubpixelrefinementGenerator(dp, vectors)
Ejemplo n.º 12
0
def test_integration_generator(radius, offset):
    pixel_positions = np.array([[0, 0], [15, -15], [-15, 15]])
    pattern = np.zeros((50, 50))
    center = np.array(pattern.shape) / 2
    i, j = (pixel_positions + center + offset).T.astype(int)
    pattern[i, j] = 1

    dv = DiffractionVectors(pixel_positions)
    dp = ElectronDiffraction2D(pattern)
    ig = IntegrationGenerator(dp, dv)
    assert isinstance(ig, IntegrationGenerator)

    inties = ig.extract_intensities(radius=radius)
    assert isinstance(inties, BaseSignal)

    assert np.allclose(inties.data, [1, 1, 1])
Ejemplo n.º 13
0
    def conventional_xc(self, square_size, disc_radius, upsample_factor):
        """Refines the peaks using (phase) cross correlation.

        Parameters
        ----------
        square_size : int
            Length (in pixels) of one side of a square the contains the peak to
            be refined.
        disc_radius:  int
            Radius (in pixels) of the discs that you seek to refine
        upsample_factor: int
            Factor by which to upsample the patterns

        Returns
        -------
        vector_out: DiffractionVectors
            DiffractionVectors containing the refined vectors in calibrated
            units with the same navigation shape as the diffraction patterns.

        """
        def _conventional_xc_map(dp, vectors, sim_disc, upsample_factor,
                                 center, calibration):
            shifts = np.zeros_like(vectors, dtype=np.float64)
            for i, vector in enumerate(vectors):
                expt_disc = get_experimental_square(dp, vector, square_size)
                shifts[i] = _conventional_xc(expt_disc, sim_disc,
                                             upsample_factor)
            return (((vectors + shifts) - center) * calibration)

        sim_disc = get_simulated_disc(square_size, disc_radius)
        self.vectors_out = DiffractionVectors(
            self.dp.map(_conventional_xc_map,
                        vectors=self.vector_pixels,
                        sim_disc=sim_disc,
                        upsample_factor=upsample_factor,
                        center=self.center,
                        calibration=self.calibration,
                        inplace=False))
        self.vectors_out.axes_manager.set_signal_dimension(0)
        self.last_method = "conventional_xc"
        return self.vectors_out
Ejemplo n.º 14
0
def test_integration_generator_summation_method():
    pixel_positions = np.array([[0, 0], [25, -25], [-25, 25]])
    pattern = np.zeros((100, 100))
    center = np.array(pattern.shape) / 2
    i, j = (pixel_positions + center).T.astype(int)
    pattern[i, j] = 1.0
    pattern = gaussian_filter(pattern, 2)

    dv = DiffractionVectors(pixel_positions)
    dp = ElectronDiffraction2D(pattern)
    ig = IntegrationGenerator(dp, dv)

    assert isinstance(ig, IntegrationGenerator)

    vectors = ig.extract_intensities_summation_method()

    assert np.allclose(pixel_positions, vectors.data, atol=0.05)
    assert np.allclose(vectors.data, pixel_positions, atol=0.05)
    assert np.allclose(vectors.intensities.data[0], 1.0, atol=0.05)
    assert np.allclose(vectors.sigma.data[0], 0.0, atol=0.05)
    assert isinstance(vectors, DiffractionVectors)
Ejemplo n.º 15
0
def diffraction_vectors_single(request):
    dvs = DiffractionVectors(request.param)
    dvs.axes_manager.set_signal_dimension(1)
    return dvs
Ejemplo n.º 16
0
    def get_vdf_segments(
        self,
        min_distance=1,
        min_size=1,
        max_size=np.inf,
        max_number_of_grains=np.inf,
        marker_radius=1,
        threshold=False,
        exclude_border=False,
    ):
        """Separate segments from each of the VDF images using
        edge-detection by the Sobel transform and the watershed
        segmentation method implemented in scikit-image [1,2]. Obtain a
        VDFSegment, similar to VDFImage, but where each image is a
        segment of a VDF and the vectors correspond to each segment and
        are not necessarily unique.

        Parameters
        ----------
        min_distance: int
            Minimum distance (in pixels) between grains required for
            them to be considered as separate grains.
        min_size : float
            Grains with size (i.e. total number of pixels) below
            min_size are discarded.
        max_size : float
            Grains with size (i.e. total number of pixels) above
            max_size are discarded.
        max_number_of_grains : int
            Maximum number of grains included in the returned separated
            grains. If it is exceeded, those with highest peak
            intensities will be returned.
        marker_radius : float
            If 1 or larger, each marker for watershed is expanded to a disk
            of radius marker_radius. marker_radius should not exceed
            2*min_distance.
        threshold: bool
            If True, a mask is calculated by thresholding the VDF image
            by the Li threshold method in scikit-image. If False
            (default), the mask is the boolean VDF image.
        exclude_border : int or True, optional
            If non-zero integer, peaks within a distance of
            exclude_border from the boarder will be discarded. If True,
            peaks at or closer than min_distance of the boarder, will be
            discarded.

        References
        ----------
        [1] http://scikit-image.org/docs/dev/auto_examples/segmentation/
            plot_watershed.html
        [2] http://scikit-image.org/docs/dev/auto_examples/xx_applications/
            plot_coins_segmentation.html#sphx-glr-auto-examples-xx-
            applications-plot-coins-segmentation-py

        Returns
        -------
        vdfsegs : VDFSegment
            VDFSegment object containing segments (i.e. grains) of
            single virtual dark field images with corresponding vectors.
        """
        vdfs = self.copy()
        vectors = self.vectors.data

        # TODO : Add aperture radius as an attribute of VDFImage?

        # Create an array of length equal to the number of vectors where each
        # element is a np.object with shape (n: number of segments for this
        # VDFImage, VDFImage size x, VDFImage size y).
        vdfsegs = np.array(
            vdfs.map(
                separate_watershed,
                show_progressbar=True,
                inplace=False,
                min_distance=min_distance,
                min_size=min_size,
                max_size=max_size,
                max_number_of_grains=max_number_of_grains,
                marker_radius=marker_radius,
                threshold=threshold,
                exclude_border=exclude_border,
            ),
            dtype=np.object,
        )

        segments, vectors_of_segments = [], []
        for i, vector in zip(np.arange(vectors.size), vectors):
            segments = np.append(segments, vdfsegs[i])
            num_segs = np.shape(vdfsegs[i])[0]
            vectors_of_segments = np.append(
                vectors_of_segments, np.broadcast_to(vector, (num_segs, 2))
            )

        vectors_of_segments = vectors_of_segments.reshape((-1, 2))
        segments = segments.reshape(
            (
                np.shape(vectors_of_segments)[0],
                vdfs.axes_manager.signal_shape[0],
                vdfs.axes_manager.signal_shape[1],
            )
        )
        # Calculate the total intensities of each segment
        segment_intensities = np.array(
            [[np.sum(x, axis=(0, 1))] for x in segments], dtype="object"
        )

        # if TraitError is raised, it is likely no segments were found
        segments = Signal2D(segments).transpose(navigation_axes=[0], signal_axes=[2, 1])
        # Create VDFSegment and transfer axes calibrations
        vdfsegs = VDFSegment(
            segments, DiffractionVectors(vectors_of_segments), segment_intensities
        )
        vdfsegs.segments = transfer_signal_axes(vdfsegs.segments, vdfs)
        n = vdfsegs.segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"
        vdfsegs.vectors_of_segments.axes_manager.set_signal_dimension(1)
        vdfsegs.vectors_of_segments = transfer_signal_axes(
            vdfsegs.vectors_of_segments, self.vectors
        )
        n = vdfsegs.vectors_of_segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"

        return vdfsegs
Ejemplo n.º 17
0
 def test_out_of_range_vectors_DiffractionVectors(self):
     vectors = DiffractionVectors(np.array([[1, -100]]))
     dp = ElectronDiffraction2D(np.ones((20, 20)))
     sprg = SubpixelrefinementGenerator(dp, vectors)
Ejemplo n.º 18
0
    def local_gaussian_method(self, square_size):
        """ Refinement based on the mathematics of a local maxima on a
        continious region, using the (discrete) maxima pixel as a starting point.
        See Notes.

        Parameters
        ----------
        square_size : int
            Length (in pixels) of one side of a square the contains the peak to
            be refined.

        Returns
        -------
        vector_out : DiffractionVectors
            DiffractionVectors containing the refined vectors in calibrated
            units with the same navigation shape as the diffraction patterns.

        Notes
        -----
        This method works by first locating the maximum intenisty value within the square.
        The four adjacent pixels are then considered and used to form two independant
        quadratic equations. Solving these gives the x_center and y_center coordinates,
        which are then returned.
        """

        def _new_lg_idea(z):
            """ Internal function providing the algebra for the local_gaussian_method,
            see docstring of that function for details

            Parameters
            ----------
            z : np.array
                subsquare containing the peak to be localised

            Returns
            -------
            (x,y) : tuple
                Containing subpixel resolved values for the center
            """
            si = np.unravel_index(np.argmax(z), z.shape)
            z_ref = z[si[0] - 1:si[0] + 2, si[1] - 1:si[1] + 2]
            if z_ref.shape != (3, 3):
                return (si[1] - z.shape[1] // 2, si[0] - z.shape[0] // 2)
            M = z_ref[1, 1]
            LX, RX = z_ref[1, 0], z_ref[1, 2]
            UY, DY = z_ref[0, 1], z_ref[2, 1]
            x_ans = 0.5 * (LX - RX) / (LX + RX - 2 * M)
            y_ans = 0.5 * (UY - DY) / (UY + DY - 2 * M)
            return (si[1] - z.shape[1] // 2 + x_ans, si[0] - z.shape[0] // 2 + y_ans)

        def _lg_map(dp, vectors, square_size, center, calibration):
            shifts = np.zeros_like(vectors, dtype=np.float64)
            for i, vector in enumerate(vectors):
                expt_disc = get_experimental_square(dp, vector, square_size)
                shifts[i] = _new_lg_idea(expt_disc)

            return (((vectors + shifts) - center) * calibration)

        self.vectors_out = DiffractionVectors(self.dp.map(_lg_map,
                                                          vectors=self.vector_pixels,
                                                          square_size=square_size,
                                                          center=self.center,
                                                          calibration=self.calibration,
                                                          inplace=False))

        # check for unrefined peaks
        def check_bad_square(z):
            si = np.unravel_index(np.argmax(z), z.shape)
            z_ref = z[si[0] - 1:si[0] + 2, si[1] - 1:si[1] + 2]
            if z_ref.shape == (3, 3):
                return False
            else:
                return True

        def _check_bad_square_map(dp, vectors, square_size):
            bad_square = False
            for i, vector in enumerate(vectors):
                expt_disc = get_experimental_square(dp, vector, square_size)
                bad_square = check_bad_square(expt_disc)
                if bad_square:
                    return True
            return False

        bad_squares = self.dp.map(_check_bad_square_map,
                                  vectors=self.vector_pixels,
                                  square_size=square_size,
                                  inplace=False)

        if np.any(bad_squares):
            warnings.warn("You have a peak in your pattern that lies on the edge of the square. \
                          Consider increasing the square size")

        self.vectors_out.axes_manager.set_signal_dimension(0)
        self.last_method = "lg_method"
        return self.vectors_out
Ejemplo n.º 19
0
    def find_peaks(self, method, *args, **kwargs):
        """Find the position of diffraction peaks.

        Function to locate the positive peaks in an image using various, user
        specified, methods. Returns a structured array containing the peak
        positions.

        Parameters
        ---------
        method : str
            Select peak finding algorithm to implement. Available methods are
            {'zaefferer', 'stat', 'laplacian_of_gaussians',
            'difference_of_gaussians', 'xc'}
        *args : arguments
            Arguments to be passed to the peak finders.
        **kwargs : arguments
            Keyword arguments to be passed to the peak finders.

        Returns
        -------
        peaks : DiffractionVectors
            A DiffractionVectors object with navigation dimensions identical to
            the original ElectronDiffraction2D object. Each signal is a BaseSignal
            object contiaining the diffraction vectors found at each navigation
            position, in calibrated units.

        Notes
        -----
        Peak finding methods are detailed as:

            * 'zaefferer' - based on gradient thresholding and refinement
              by local region of interest optimisation
            * 'stat' - statistical approach requiring no free params.
            * 'laplacian_of_gaussians' - a blob finder implemented in
              `scikit-image` which uses the laplacian of Gaussian matrices
              approach.
            * 'difference_of_gaussians' - a blob finder implemented in
              `scikit-image` which uses the difference of Gaussian matrices
              approach.
            * 'xc' - A cross correlation peakfinder

        """
        method_dict = {
            'zaefferer': find_peaks_zaefferer,
            'stat': find_peaks_stat,
            'laplacian_of_gaussians': find_peaks_log,
            'difference_of_gaussians': find_peaks_dog,
            'xc': find_peaks_xc
        }
        if method in method_dict:
            method = method_dict[method]
        else:
            raise NotImplementedError("The method `{}` is not implemented. "
                                      "See documentation for available "
                                      "implementations.".format(method))

        peaks = self.map(method, *args, **kwargs, inplace=False, ragged=True)
        peaks.map(peaks_as_gvectors,
                  center=np.array(self.axes_manager.signal_shape) / 2 - 0.5,
                  calibration=self.axes_manager.signal_axes[0].scale)
        peaks = DiffractionVectors(peaks)
        peaks.axes_manager.set_signal_dimension(0)

        # Set DiffractionVectors attributes
        peaks.pixel_calibration = self.axes_manager.signal_axes[0].scale
        peaks.detector_shape = self.axes_manager.signal_shape

        # Set calibration to same as signal
        x = peaks.axes_manager.navigation_axes[0]
        y = peaks.axes_manager.navigation_axes[1]

        x.name = 'x'
        x.scale = self.axes_manager.navigation_axes[0].scale
        x.units = 'nm'

        y.name = 'y'
        y.scale = self.axes_manager.navigation_axes[1].scale
        y.units = 'nm'

        return peaks
Ejemplo n.º 20
0
    def correlate_vdf_segments(self,
                               corr_threshold=0.7,
                               vector_threshold=4,
                               segment_threshold=3):
        """Iterates through VDF segments and sums those that are
        associated with the same segment. Summation will be done for
        those segments that have a normalised cross correlation above
        corr_threshold. The vectors of each segment sum will be updated
        accordingly, so that the vectors of each resulting segment sum
        are all the vectors of the original individual segments. Each
        vector is assigned an intensity that is the integrated intensity
        of the segment it originates from.

        Parameters
        ----------
        corr_threshold : float
            Segments will be summed if they have a normalized cross-
            correlation above corr_threshold. Must be between 0 and 1.
        vector_threshold : int, optional
            Correlated segments having a number of vectors less than
            vector_threshold will be discarded.
        segment_threshold : int, optional
            Correlated segment intensities that lie in a region where
            a number of segments less than segment_thresholdhave been
            found, are set to 0, i.e. the resulting segment will only
            have intensities above 0 where at least a number of
            segment_threshold segments have intensitives above 0.

        Returns
        -------
        vdfseg : VDFSegment
            The VDFSegment instance updated according to the image
            correlation results.
        """
        vectors = self.vectors_of_segments.data

        if segment_threshold > vector_threshold:
            raise ValueError("segment_threshold must be smaller than or "
                             "equal to vector_threshold.")

        segments = self.segments.data.copy()
        num_vectors = np.shape(vectors)[0]
        gvectors = np.array(np.empty(num_vectors, dtype=object))
        vector_indices = np.array(np.empty(num_vectors, dtype=object))

        for i in np.arange(num_vectors):
            gvectors[i] = np.array(vectors[i].copy())
            vector_indices[i] = np.array([i], dtype=int)

        correlated_segments = np.zeros_like(segments[:1])
        correlated_vectors = np.array([0.0], dtype=object)
        correlated_vectors[0] = np.array(np.zeros_like(vectors[:1]))
        correlated_vector_indices = np.array([0], dtype=object)
        correlated_vector_indices[0] = np.array([0])
        i = 0
        pbar = tqdm(total=np.shape(segments)[0])
        while np.shape(segments)[0] > i:
            # For each segment, calculate the normalized cross-correlation to
            # all other segments, and define add_indices for those with a value
            # above corr_threshold.
            corr_list = list(
                map(lambda x: norm_cross_corr(x, template=segments[i]),
                    segments))

            corr_add = list(map(lambda x: x > corr_threshold, corr_list))
            add_indices = np.where(corr_add)
            # If there are more add_indices than vector_threshold,
            # sum segments and add their vectors. Otherwise, discard segment.
            if (np.shape(add_indices[0])[0] >= vector_threshold
                    and np.shape(add_indices[0])[0] > 1):
                new_segment = np.array([np.sum(segments[add_indices], axis=0)])
                if segment_threshold > 1:
                    segment_check = np.zeros_like(segments[add_indices],
                                                  dtype=int)
                    segment_check[np.where(segments[add_indices])] = 1
                    segment_check = np.sum(segment_check, axis=0, dtype=int)
                    segment_mask = np.zeros_like(segments[0], dtype=bool)
                    segment_mask[np.where(
                        segment_check >= segment_threshold)] = 1
                    new_segment = new_segment * segment_mask
                correlated_segments = np.append(correlated_segments,
                                                new_segment,
                                                axis=0)
                add_indices = add_indices[0]
                new_vectors = np.array([0], dtype=object)
                new_vectors[0] = np.concatenate(gvectors[add_indices],
                                                axis=0).reshape(-1, 2)
                correlated_vectors = np.append(correlated_vectors,
                                               new_vectors,
                                               axis=0)
                new_indices = np.array([0], dtype=object)
                new_indices[0] = np.concatenate(vector_indices[add_indices],
                                                axis=0).reshape(-1, 1)
                correlated_vector_indices = np.append(
                    correlated_vector_indices, new_indices, axis=0)
            elif np.shape(add_indices[0])[0] >= vector_threshold:
                add_indices = add_indices[0]
                correlated_segments = np.append(correlated_segments,
                                                segments[add_indices],
                                                axis=0)
                correlated_vectors = np.append(correlated_vectors,
                                               gvectors[add_indices],
                                               axis=0)
                correlated_vector_indices = np.append(
                    correlated_vector_indices,
                    vector_indices[add_indices],
                    axis=0)
            else:
                add_indices = i
            segments = np.delete(segments, add_indices, axis=0)
            gvectors = np.delete(gvectors, add_indices, axis=0)
            vector_indices = np.delete(vector_indices, add_indices, axis=0)

        pbar.close()
        correlated_segments = np.delete(correlated_segments, 0, axis=0)
        correlated_vectors = np.delete(correlated_vectors, 0, axis=0)
        correlated_vector_indices = np.delete(correlated_vector_indices,
                                              0,
                                              axis=0)
        correlated_vector_intensities = np.array(np.empty(
            len(correlated_vectors)),
                                                 dtype=object)

        # Sum the intensities in the original segments and assign those to the
        # correct vectors by referring to vector_indices.
        # If segment_mask has been used, use the segments as masks too.
        if segment_threshold > 1:
            for i in range(len(correlated_vectors)):
                correlated_vector_intensities[i] = np.zeros(
                    len(correlated_vector_indices[i]))
                segment_mask = np.zeros_like(segment_mask)
                segment_mask[np.where(correlated_segments[i])] = 1
                segment_intensities = np.sum(self.segments.data * segment_mask,
                                             axis=(1, 2))
                for n, index in zip(
                        range(len(correlated_vector_indices[i])),
                        correlated_vector_indices[i],
                ):
                    correlated_vector_intensities[i][n] = np.sum(
                        segment_intensities[index])
        else:
            segment_intensities = np.sum(self.segments.data, axis=(1, 2))
            for i in range(len(correlated_vectors)):
                correlated_vector_intensities[i] = np.zeros(
                    len(correlated_vector_indices[i]))
                for n, index in zip(
                        range(len(correlated_vector_indices[i])),
                        correlated_vector_indices[i],
                ):
                    correlated_vector_intensities[i][n] = np.sum(
                        segment_intensities[index])

        vdfseg = VDFSegment(
            Signal2D(correlated_segments),
            DiffractionVectors(correlated_vectors),
            correlated_vector_intensities,
        )

        # Transfer axes properties of segments
        vdfseg.segments = transfer_signal_axes(vdfseg.segments, self.segments)
        n = vdfseg.segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"

        return vdfseg
Ejemplo n.º 21
0
    def center_of_mass_method(self, square_size):
        """Find the subpixel refinement of a peak by assuming it lies at the
        center of intensity.

        Parameters
        ----------
        square_size : int
            Length (in pixels) of one side of a square the contains the peak to
            be refined.

        Returns
        -------
        vector_out: DiffractionVectors
            DiffractionVectors containing the refined vectors in calibrated
            units with the same navigation shape as the diffraction patterns.

        """

        def _center_of_mass_hs(z):
            """Return the center of mass of an array with coordinates in the
            hyperspy convention

            Parameters
            ----------
            z : np.array

            Returns
            -------
            (x,y) : tuple of floats
                The x and y locations of the center of mass of the parsed square
            """

            s = np.sum(z)
            if s != 0:
                z *= 1 / s
            dx = np.sum(z, axis=0)
            dy = np.sum(z, axis=1)
            h, w = z.shape
            cx = np.sum(dx * np.arange(w))
            cy = np.sum(dy * np.arange(h))
            return cx, cy

        def _com_experimental_square(z, vector, square_size):
            """Wrapper for get_experimental_square that makes the non-zero
            elements symmetrical around the 'unsubpixeled' peak by zeroing a
            'spare' row and column (top and left).

            Parameters
            ----------
            z : np.array

            vector : np.array([x,y])

            square_size : int (even)

            Returns
            -------
            z_adpt : np.array
                z, but with row and column zero set to 0
            """
            # Copy to make sure we don't change the dp
            z_adpt = np.copy(get_experimental_square(z, vector=vector, square_size=square_size))
            z_adpt[:, 0] = 0
            z_adpt[0, :] = 0
            return z_adpt

        def _center_of_mass_map(dp, vectors, square_size, center, calibration):
            shifts = np.zeros_like(vectors, dtype=np.float64)
            for i, vector in enumerate(vectors):
                expt_disc = _com_experimental_square(dp, vector, square_size)
                shifts[i] = [a - square_size / 2 for a in _center_of_mass_hs(expt_disc)]
            return ((vectors + shifts) - center) * calibration

        self.vectors_out = DiffractionVectors(
            self.dp.map(_center_of_mass_map,
                        vectors=self.vector_pixels,
                        square_size=square_size,
                        center=self.center,
                        calibration=self.calibration,
                        inplace=False))
        self.vectors_out.axes_manager.set_signal_dimension(0)

        self.last_method = "center_of_mass_method"
        return self.vectors_out
Ejemplo n.º 22
0
def test_vector_indexation_generator_cartesian_check():
    vectors = DiffractionVectors([[1], [2]])
    vector_library = DiffractionVectorLibrary()
    vector_indexation_generator = VectorIndexationGenerator(
        vectors, vector_library)
Ejemplo n.º 23
0
 def test_wrong_navigation_dimensions(self):
     dp = ElectronDiffraction2D(np.zeros((2, 2, 8, 8)))
     vectors = DiffractionVectors(np.zeros((1, 2)))
     dp.axes_manager.set_signal_dimension(2)
     vectors.axes_manager.set_signal_dimension(0)
     SPR_generator = SubpixelrefinementGenerator(dp, vectors)
Ejemplo n.º 24
0
 def create_Diffraction_vectors(self):
     v1 = np.array([[90 - 64, 30 - 64]])
     v2 = np.array([[90 - 64, 30 - 64], [100 - 64, 60 - 64]])
     vectors = DiffractionVectors(np.array([[v1, v1], [v2, v2]]))
     vectors.axes_manager.set_signal_dimension(0)
     return vectors
Ejemplo n.º 25
0
    def find_peaks(self, method='skimage', *args, **kwargs):
        """Find the position of diffraction peaks.

        Function to locate the positive peaks in an image using various, user
        specified, methods. Returns a structured array containing the peak
        positions.

        Parameters
        ---------
        method : str
            Select peak finding algorithm to implement. Available methods are:

            * 'max' - simple local maximum search
            * 'skimage' - call the peak finder implemented in scikit-image which
              uses a maximum filter
            * 'minmax' - finds peaks by comparing maximum filter results
              with minimum filter, calculates centers of mass
            * 'zaefferer' - based on gradient thresholding and refinement
              by local region of interest optimisation
            * 'stat' - statistical approach requiring no free params.
            * 'laplacian_of_gaussians' - a blob finder implemented in
              `scikit-image` which uses the laplacian of Gaussian matrices
              approach.
            * 'difference_of_gaussians' - a blob finder implemented in
              `scikit-image` which uses the difference of Gaussian matrices
              approach.
            * 'regionprops' - Uses regionprops to find islands of connected
               pixels representing a peak

        *args
            associated with above methods
        **kwargs
            associated with above methods.

        Returns
        -------
        peaks : DiffractionVectors
            A DiffractionVectors object with navigation dimensions identical to
            the original ElectronDiffraction object. Each signal is a BaseSignal
            object contiaining the diffraction vectors found at each navigation
            position, in calibrated units.
        """
        method_dict = {
            'skimage': peak_local_max,
            'zaefferer': find_peaks_zaefferer,
            'stat': find_peaks_stat,
            'laplacian_of_gaussians':  find_peaks_log,
            'difference_of_gaussians': find_peaks_dog,
        }
        if method in method_dict:
            method = method_dict[method]
        else:
            raise NotImplementedError("The method `{}` is not implemented. "
                                      "See documentation for available "
                                      "implementations.".format(method))

        peaks = self.map(method, *args, **kwargs, inplace=False, ragged=True)
        peaks.map(peaks_as_gvectors,
                  center=np.array(self.axes_manager.signal_shape)/2 - 0.5,
                  calibration=self.axes_manager.signal_axes[0].scale)
        peaks = DiffractionVectors(peaks)
        peaks.axes_manager.set_signal_dimension(0)
        if peaks.axes_manager.navigation_dimension != self.axes_manager.navigation_dimension:
            peaks = peaks.transpose(navigation_axes=2)
        if peaks.axes_manager.navigation_dimension != self.axes_manager.navigation_dimension:
            raise RuntimeWarning('You do not have the same size navigation axes \
            for your Diffraction pattern and your peaks')

        return peaks
Ejemplo n.º 26
0
    def extract_intensities_summation_method(self,
                                             box_inner: int = 7,
                                             box_outer: int = 10,
                                             n_min: int = 5,
                                             n_max: int = 1000,
                                             snr_thresh: float = 3.0):
        """Integrate reflections using the summation method. Two boxes are defined,
        the inner box is used to define the integration area. The outer box is used
        to calculate the average signal-to-noise ratio (SNR).
        All pixels with a large enough SNR are considered to be signal. The largest region
        of connected signal pixels are summed to calculate the reflection intensity. The
        diffraction vectors are calculated as the center of mass of the signal pixels.

        Parameters
        ----------
        box_inner : int
            Defines the size of the inner box, which must be larger than the reflection.
        box_outer : int
            Defines the size of the outer box. The border between the inner and outer
            box is considered background and used to calculate the (SNR) for each
            pixel: SNR = (I - <I>/std(I_bkg)).
        snr_thresh : float
            Minimum signal-to-noise for a pixel to be considered as `signal`.
        n_min: int
            If the number of SNR pixels in the inner box < n_min, the reflection is discared
        n_max:
            If the number of SNR pixels in the inner box > n_max, the reflection is discareded
        verbose : bool
            Print statistics for every reflection (for debugging)

        Returns
        -------
        vectors : :obj:`pyxem.signals.diffraction_vectors.DiffractionVectors`
            DiffractionVectors with optimized coordinates, where the attributes
            vectors.intensities -> `I`, vectors.sigma -> `sigma(I)`, and
            vectors.snr -> `I / sigma(I)`

        Notes
        -----
        Implementation based on Barty et al, J. Appl. Cryst. (2014). 47, 1118-1131
                                Lesli, Acta Cryst. (2006). D62, 48-57
        """
        result = self.dp.map(_get_intensities_summation_method,
                             vectors=self.vector_pixels,
                             box_inner=box_inner,
                             box_outer=box_outer,
                             n_min=n_min,
                             n_max=n_max,
                             snr_thresh=snr_thresh,
                             inplace=False,
                             ragged=True)

        peaks = result.map(_take_ragged,
                           indices=[0, 1],
                           _axis=1,
                           inplace=False,
                           ragged=True)
        intensities = result.map(_take_ragged,
                                 indices=2,
                                 _axis=1,
                                 inplace=False,
                                 ragged=True)
        sigma = result.map(_take_ragged,
                           indices=3,
                           _axis=1,
                           inplace=False,
                           ragged=True)

        vectors = DiffractionVectors.from_peaks(peaks,
                                                calibration=self.calibration,
                                                center=self.center)
        vectors.intensities = intensities
        vectors.sigma = sigma
        vectors.snr = intensities / sigma

        return vectors
Ejemplo n.º 27
0
def diffraction_vectors_map(request):
    dvm = DiffractionVectors(request.param)
    dvm.axes_manager.set_signal_dimension(0)
    return dvm
Ejemplo n.º 28
0
def unique_vectors(request):
    uv = DiffractionVectors(request.param)
    uv.axes_manager.set_signal_dimension(0)
    return uv
Ejemplo n.º 29
0
def diffraction_vectors_map(request):
    dvm = DiffractionVectors(request.param)
    dvm.axes_manager.set_signal_dimension(0)
    dvm.axes_manager[0].name = "x"
    dvm.axes_manager[1].name = "y"
    return dvm
Ejemplo n.º 30
0
def test_bad_vectors_DiffractionVectors():
    v = np.array([[1, -100]])
    dv = DiffractionVectors(v)
    dp = ElectronDiffraction(np.ones((20, 20)))
    sprg = SubpixelrefinementGenerator(dp, dv)