Exemple #1
0
def test_vector_get_indexed_diffraction_vectors_warn():
    match_results = VectorMatchingResults(np.array([[1], [2]]))
    match_results.hkls = [0, 0, 1]
    vectors = DiffractionVectors(np.array([[1], [2]]))
    vectors.hkls = [0, 0, 0]
    with pytest.warns(Warning):
        match_results.get_indexed_diffraction_vectors(vectors)
    np.testing.assert_allclose(vectors.hkls, [0, 0, 0])
Exemple #2
0
def test_vector_get_indexed_diffraction_vectors(overwrite, result_hkl,
                                                current_hkl, expected_hkl):
    match_results = VectorMatchingResults(np.array([[1], [2]]))
    match_results.hkls = result_hkl
    vectors = DiffractionVectors(np.array([[1], [2]]))
    vectors.hkls = current_hkl
    match_results.get_indexed_diffraction_vectors(vectors, overwrite)
    np.testing.assert_allclose(vectors.hkls, expected_hkl)
def test_vector_indexation_generator_init():
    vectors = DiffractionVectors([[1], [2]])
    vectors.cartesian = [[1], [2]]
    vector_library = DiffractionVectorLibrary()
    vector_indexation_generator = VectorIndexationGenerator(
        vectors, vector_library)
    assert isinstance(vector_indexation_generator, VectorIndexationGenerator)
    assert vector_indexation_generator.vectors == vectors
    assert vector_indexation_generator.library == vector_library
Exemple #4
0
 def create_diffraction_vectors(self):
     v1 = np.array([[90 - 64, 30 - 64]])
     v2 = np.array([[90 - 64, 30 - 64], [100 - 64, 60 - 64]])
     self.diffraction_vectors = DiffractionVectors(
         np.array([[v1, v1], [v2, v2]], dtype=object),
         ragged=True,
     )
Exemple #5
0
    def test_out_of_range_vectors_DiffractionVectors(self):
        """Test that putting vectors that lie outside of the
        diffraction patterns raises a ValueError"""
        vectors = DiffractionVectors(np.array([[1, -100]]))
        dp = ElectronDiffraction2D(np.ones((20, 20)))

        with pytest.raises(ValueError):
            _ = SubpixelrefinementGenerator(dp, vectors)
def test_vector_indexation_generator_index_vectors(vector_match_peaks,
                                                   vector_library):
    # vectors not used directly
    vectors = DiffractionVectors(np.array(vector_match_peaks[:, :2]))
    vectors.cartesian = DiffractionVectors(np.array(vector_match_peaks))
    gen = VectorIndexationGenerator(vectors, vector_library)
    indexation = gen.index_vectors(mag_tol=0.1,
                                   angle_tol=6,
                                   index_error_tol=0.3,
                                   n_peaks_to_index=2,
                                   n_best=5)

    # Values are tested directly on the match_vector in the util tests
    assert isinstance(indexation.vectors, DiffractionVectors)

    # (n_best=1, 5 result values from each)
    np.testing.assert_equal(indexation.data.shape, (5, ))

    # n_best=1, 3 peaks with hkl)
    np.testing.assert_equal(indexation.hkls.shape, (1, 3, 3))

    refined1 = gen.refine_n_best_orientations(indexation, 1.0, 1.0, n_best=0)

    assert isinstance(refined1.vectors, DiffractionVectors)
    np.testing.assert_equal(refined1.data.shape, (5, ))

    refined2 = gen.refine_best_orientation(indexation, 1.0, 1.0)

    assert isinstance(refined2.vectors, DiffractionVectors)
    np.testing.assert_equal(refined2.data.shape, (1, ))
    assert isinstance(refined2.data[0], OrientationResult)

    assert refined2.data[0].phase_index == indexation.data[0].phase_index
    assert refined2.data[0].match_rate == indexation.data[0].match_rate

    # Must use a large tolerance here, because there are only 3 vectors
    np.testing.assert_almost_equal(
        np.diag(refined1.data[0].rotation_matrix),
        np.diag(indexation.data[0].rotation_matrix),
        1,
    )
    np.testing.assert_almost_equal(
        np.diag(refined2.data[0].rotation_matrix),
        np.diag(indexation.data[0].rotation_matrix),
        1,
    )
def test_vector_indexation_generator_cartesian_check():
    vectors = DiffractionVectors([[1], [2]])
    vector_library = DiffractionVectorLibrary()

    with pytest.raises(
            ValueError,
            match=
            "Cartesian coordinates are required in order to index diffraction vectors",
    ):
        vector_indexation_generator = VectorIndexationGenerator(
            vectors, vector_library)
    def test_wrong_navigation_dimensions(self):
        """Tests that navigation dimensions must be appropriate too."""
        dp = ElectronDiffraction2D(np.zeros((2, 2, 8, 8)))
        vectors = DiffractionVectors(np.zeros((1, 2)))
        dp.axes_manager.set_signal_dimension(2)
        vectors.axes_manager.set_signal_dimension(0)

        # Note - uses regex via re.search()
        with pytest.raises(
            ValueError,
            match=r"Vectors with shape .* must have the same navigation shape as .*",
        ):
            _ = SubpixelrefinementGenerator(dp, vectors)
def test_vdf_generator_from_map(diffraction_pattern):
    dvm = DiffractionVectors(
        np.array(
            [
                [np.array([[1, 1], [2, 2]]), np.array([[1, 1], [2, 2], [1, 2]])],
                [np.array([[1, 1], [2, 2]]), np.array([[1, 1], [2, 2]])],
            ],
            dtype=object,
        )
    )
    dvm.axes_manager.set_signal_dimension(0)

    vdfgen = VirtualImageGenerator(diffraction_pattern, dvm)
    assert isinstance(vdfgen, VirtualImageGenerator)
    def test_vdf_generator_init_with_vectors(self, diffraction_pattern):
        dvm = DiffractionVectors(
            np.array(
                [
                    [np.array([[1, 1], [2, 2]]), np.array([[1, 1], [2, 2], [1, 2]])],
                    [np.array([[1, 1], [2, 2]]), np.array([[1, 1], [2, 2]])],
                ],
                dtype=object,
            )
        )
        dvm.axes_manager.set_signal_dimension(0)

        vdfgen = VirtualDarkFieldGenerator(diffraction_pattern, dvm)
        assert isinstance(vdfgen.signal, ElectronDiffraction2D)
        assert isinstance(vdfgen.vectors, DiffractionVectors)
def test_integration_generator(radius, offset):
    pixel_positions = np.array([[0, 0], [15, -15], [-15, 15]])
    pattern = np.zeros((50, 50))
    center = np.array(pattern.shape) / 2
    i, j = (pixel_positions + center + offset).T.astype(int)
    pattern[i, j] = 1

    dv = DiffractionVectors(pixel_positions)
    dp = ElectronDiffraction2D(pattern)
    ig = IntegrationGenerator(dp, dv)
    assert isinstance(ig, IntegrationGenerator)

    inties = ig.extract_intensities(radius=radius)
    assert isinstance(inties, BaseSignal)

    assert np.allclose(inties.data, [1, 1, 1])
def test_integration_generator_summation_method():
    pixel_positions = np.array([[0, 0], [25, -25], [-25, 25]])
    pattern = np.zeros((100, 100))
    center = np.array(pattern.shape) / 2
    i, j = (pixel_positions + center).T.astype(int)
    pattern[i, j] = 1.0
    pattern = gaussian_filter(pattern, 2)

    dv = DiffractionVectors(pixel_positions)
    dp = ElectronDiffraction2D(pattern)
    ig = IntegrationGenerator(dp, dv)

    assert isinstance(ig, IntegrationGenerator)

    vectors = ig.extract_intensities_summation_method()

    assert np.allclose(pixel_positions, vectors.data, atol=0.05)
    assert np.allclose(vectors.data, pixel_positions, atol=0.05)
    assert np.allclose(vectors.intensities.data[0], 1.0, atol=0.05)
    assert np.allclose(vectors.sigma.data[0], 0.0, atol=0.05)
    assert isinstance(vectors, DiffractionVectors)
    def get_vdf_segments(
        self,
        min_distance=1,
        min_size=1,
        max_size=np.inf,
        max_number_of_grains=np.inf,
        marker_radius=1,
        threshold=False,
        exclude_border=False,
    ):
        """Separate segments from each of the virtual dark field (VDF) images
        using edge-detection by the Sobel transform and the watershed
        segmentation method implemented in scikit-image [1,2]. Obtain a
        VDFSegment, similar to VDFImage, but where each image is a
        segment of a VDF and the vectors correspond to each segment and
        are not necessarily unique.

        Parameters
        ----------
        min_distance: int
            Minimum distance (in pixels) between grains required for
            them to be considered as separate grains.
        min_size : float
            Grains with size (i.e. total number of pixels) below
            min_size are discarded.
        max_size : float
            Grains with size (i.e. total number of pixels) above
            max_size are discarded.
        max_number_of_grains : int
            Maximum number of grains included in the returned separated
            grains. If it is exceeded, those with highest peak
            intensities will be returned.
        marker_radius : float
            If 1 or larger, each marker for watershed is expanded to a disk
            of radius marker_radius. marker_radius should not exceed
            2*min_distance.
        threshold: bool
            If True, a mask is calculated by thresholding the VDF image
            by the Li threshold method in scikit-image. If False
            (default), the mask is the boolean VDF image.
        exclude_border : int or True, optional
            If non-zero integer, peaks within a distance of
            exclude_border from the boarder will be discarded. If True,
            peaks at or closer than min_distance of the boarder, will be
            discarded.

        References
        ----------
        [1] http://scikit-image.org/docs/dev/auto_examples/segmentation/
            plot_watershed.html
        [2] http://scikit-image.org/docs/dev/auto_examples/xx_applications/
            plot_coins_segmentation.html#sphx-glr-auto-examples-xx-
            applications-plot-coins-segmentation-py

        Returns
        -------
        vdfsegs : VDFSegment
            VDFSegment object containing segments (i.e. grains) of
            single virtual dark field images with corresponding vectors.
        """
        vdfs = self.copy()
        vectors = self.vectors.data

        # TODO : Add aperture radius as an attribute of VDFImage?

        # Create an array of length equal to the number of vectors where each
        # element is a np.object with shape (n: number of segments for this
        # VDFImage, VDFImage size x, VDFImage size y).
        vdfsegs = np.array(
            vdfs.map(
                separate_watershed,
                show_progressbar=True,
                inplace=False,
                min_distance=min_distance,
                min_size=min_size,
                max_size=max_size,
                max_number_of_grains=max_number_of_grains,
                marker_radius=marker_radius,
                threshold=threshold,
                exclude_border=exclude_border,
            ),
            dtype=np.object,
        )

        segments, vectors_of_segments = [], []
        for i, vector in zip(np.arange(vectors.size), vectors):
            segments = np.append(segments, vdfsegs[i])
            num_segs = np.shape(vdfsegs[i])[0]
            vectors_of_segments = np.append(
                vectors_of_segments, np.broadcast_to(vector, (num_segs, 2)))

        vectors_of_segments = vectors_of_segments.reshape((-1, 2))
        segments = segments.reshape((
            np.shape(vectors_of_segments)[0],
            vdfs.axes_manager.signal_shape[0],
            vdfs.axes_manager.signal_shape[1],
        ))
        # Calculate the total intensities of each segment
        segment_intensities = np.array([[np.sum(x, axis=(0, 1))]
                                        for x in segments],
                                       dtype="object")

        # if TraitError is raised, it is likely no segments were found
        segments = Signal2D(segments).transpose(navigation_axes=[0],
                                                signal_axes=[2, 1])
        # Create VDFSegment and transfer axes calibrations
        vdfsegs = VDFSegment(segments, DiffractionVectors(vectors_of_segments),
                             segment_intensities)
        vdfsegs.segments = transfer_signal_axes(vdfsegs.segments, vdfs)
        n = vdfsegs.segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"
        vdfsegs.vectors_of_segments.axes_manager.set_signal_dimension(1)
        vdfsegs.vectors_of_segments = transfer_signal_axes(
            vdfsegs.vectors_of_segments, self.vectors)
        n = vdfsegs.vectors_of_segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"

        return vdfsegs
Exemple #14
0
    def extract_intensities_summation_method(
        self,
        box_inner: int = 7,
        box_outer: int = 10,
        n_min: int = 5,
        n_max: int = 1000,
        snr_thresh: float = 3.0,
    ):
        """Integrate reflections using the summation method. Two boxes are defined,
        the inner box is used to define the integration area. The outer box is used
        to calculate the average signal-to-noise ratio (SNR).
        All pixels with a large enough SNR are considered to be signal. The largest region
        of connected signal pixels are summed to calculate the reflection intensity. The
        diffraction vectors are calculated as the center of mass of the signal pixels.

        Parameters
        ----------
        box_inner : int
            Defines the size of the inner box, which must be larger than the reflection.
        box_outer : int
            Defines the size of the outer box. The border between the inner and outer
            box is considered background and used to calculate the (SNR) for each
            pixel: SNR = (I - <I>/std(I_bkg)).
        snr_thresh : float
            Minimum signal-to-noise for a pixel to be considered as `signal`.
        n_min: int
            If the number of SNR pixels in the inner box < n_min, the reflection is discared
        n_max:
            If the number of SNR pixels in the inner box > n_max, the reflection is discareded
        verbose : bool
            Print statistics for every reflection (for debugging)

        Returns
        -------
        vectors : :obj:`pyxem.signals.diffraction_vectors.DiffractionVectors`
            DiffractionVectors with optimized coordinates, where the attributes
            vectors.intensities -> `I`, vectors.sigma -> `sigma(I)`, and
            vectors.snr -> `I / sigma(I)`

        Notes
        -----
        Implementation based on Barty et al, J. Appl. Cryst. (2014). 47, 1118-1131
                                Lesli, Acta Cryst. (2006). D62, 48-57
        """
        _logger.warning(
            "This function might not work properly at the moment, check that the "
            "returned results looks reasonable."
        )
        result = self.dp.map(
            _get_intensities_summation_method,
            vectors=self.vector_pixels,
            box_inner=box_inner,
            box_outer=box_outer,
            n_min=n_min,
            n_max=n_max,
            snr_thresh=snr_thresh,
            inplace=False,
            ragged=True,
        )

        peaks = result.map(
            _take_ragged, indices=[0, 1], _axis=1, inplace=False, ragged=True
        )
        intensities = result.map(
            _take_ragged, indices=2, _axis=1, inplace=False, ragged=True
        )
        sigma = result.map(_take_ragged, indices=3, _axis=1, inplace=False, ragged=True)

        vectors = DiffractionVectors.from_peaks(
            peaks, calibration=self.calibration, center=self.center
        )
        vectors.intensities = intensities
        vectors.sigma = sigma
        vectors.snr = intensities / sigma

        return vectors
Exemple #15
0
def diffraction_vectors_map(request):
    dvm = DiffractionVectors(request.param)
    dvm.axes_manager.set_signal_dimension(0)
    dvm.axes_manager[0].name = "x"
    dvm.axes_manager[1].name = "y"
    return dvm
Exemple #16
0
 def create_Diffraction_vectors(self):
     v1 = np.array([[90 - 64, 30 - 64]])
     v2 = np.array([[90 - 64, 30 - 64], [100 - 64, 60 - 64]])
     vectors = DiffractionVectors(np.array([[v1, v1], [v2, v2]]))
     vectors.axes_manager.set_signal_dimension(0)
     return vectors
Exemple #17
0
def unique_vectors(request):
    uv = DiffractionVectors(request.param)
    uv.axes_manager.set_signal_dimension(0)
    return uv
Exemple #18
0
    def correlate_vdf_segments(self,
                               corr_threshold=0.7,
                               vector_threshold=4,
                               segment_threshold=3):
        """Iterates through VDF segments and sums those that are
        associated with the same segment. Summation will be done for
        those segments that have a normalised cross correlation above
        corr_threshold. The vectors of each segment sum will be updated
        accordingly, so that the vectors of each resulting segment sum
        are all the vectors of the original individual segments. Each
        vector is assigned an intensity that is the integrated intensity
        of the segment it originates from.

        Parameters
        ----------
        corr_threshold : float
            Segments will be summed if they have a normalized cross-
            correlation above corr_threshold. Must be between 0 and 1.
        vector_threshold : int, optional
            Correlated segments having a number of vectors less than
            vector_threshold will be discarded.
        segment_threshold : int, optional
            Correlated segment intensities that lie in a region where
            a number of segments less than segment_thresholdhave been
            found, are set to 0, i.e. the resulting segment will only
            have intensities above 0 where at least a number of
            segment_threshold segments have intensitives above 0.

        Returns
        -------
        vdfseg : VDFSegment
            The VDFSegment instance updated according to the image
            correlation results.
        """
        vectors = self.vectors_of_segments.data

        if segment_threshold > vector_threshold:
            raise ValueError("segment_threshold must be smaller than or "
                             "equal to vector_threshold.")

        segments = self.segments.data.copy()
        num_vectors = np.shape(vectors)[0]
        gvectors = np.array(np.empty(num_vectors, dtype=object))
        vector_indices = np.array(np.empty(num_vectors, dtype=object))

        for i in np.arange(num_vectors):
            gvectors[i] = np.array(vectors[i].copy())
            vector_indices[i] = np.array([i], dtype=int)

        correlated_segments = np.zeros_like(segments[:1])
        correlated_vectors = np.array([0.0], dtype=object)
        correlated_vectors[0] = np.array(np.zeros_like(vectors[:1]))
        correlated_vector_indices = np.array([0], dtype=object)
        correlated_vector_indices[0] = np.array([0])
        i = 0
        pbar = tqdm(total=np.shape(segments)[0])
        while np.shape(segments)[0] > i:
            # For each segment, calculate the normalized cross-correlation to
            # all other segments, and define add_indices for those with a value
            # above corr_threshold.
            corr_list = list(
                map(lambda x: norm_cross_corr(x, template=segments[i]),
                    segments))

            corr_add = list(map(lambda x: x > corr_threshold, corr_list))
            add_indices = np.where(corr_add)
            # If there are more add_indices than vector_threshold,
            # sum segments and add their vectors. Otherwise, discard segment.
            if (np.shape(add_indices[0])[0] >= vector_threshold
                    and np.shape(add_indices[0])[0] > 1):
                new_segment = np.array([np.sum(segments[add_indices], axis=0)])
                if segment_threshold > 1:
                    segment_check = np.zeros_like(segments[add_indices],
                                                  dtype=int)
                    segment_check[np.where(segments[add_indices])] = 1
                    segment_check = np.sum(segment_check, axis=0, dtype=int)
                    segment_mask = np.zeros_like(segments[0], dtype=bool)
                    segment_mask[np.where(
                        segment_check >= segment_threshold)] = 1
                    new_segment = new_segment * segment_mask
                correlated_segments = np.append(correlated_segments,
                                                new_segment,
                                                axis=0)
                add_indices = add_indices[0]
                new_vectors = np.array([0], dtype=object)
                new_vectors[0] = np.concatenate(gvectors[add_indices],
                                                axis=0).reshape(-1, 2)
                correlated_vectors = np.append(correlated_vectors,
                                               new_vectors,
                                               axis=0)
                new_indices = np.array([0], dtype=object)
                new_indices[0] = np.concatenate(vector_indices[add_indices],
                                                axis=0).reshape(-1, 1)
                correlated_vector_indices = np.append(
                    correlated_vector_indices, new_indices, axis=0)
            elif np.shape(add_indices[0])[0] >= vector_threshold:
                add_indices = add_indices[0]
                correlated_segments = np.append(correlated_segments,
                                                segments[add_indices],
                                                axis=0)
                correlated_vectors = np.append(correlated_vectors,
                                               gvectors[add_indices],
                                               axis=0)
                correlated_vector_indices = np.append(
                    correlated_vector_indices,
                    vector_indices[add_indices],
                    axis=0)
            else:
                add_indices = i
            segments = np.delete(segments, add_indices, axis=0)
            gvectors = np.delete(gvectors, add_indices, axis=0)
            vector_indices = np.delete(vector_indices, add_indices, axis=0)

        pbar.close()
        correlated_segments = np.delete(correlated_segments, 0, axis=0)
        correlated_vectors = np.delete(correlated_vectors, 0, axis=0)
        correlated_vector_indices = np.delete(correlated_vector_indices,
                                              0,
                                              axis=0)
        correlated_vector_intensities = np.array(np.empty(
            len(correlated_vectors)),
                                                 dtype=object)

        # Sum the intensities in the original segments and assign those to the
        # correct vectors by referring to vector_indices.
        # If segment_mask has been used, use the segments as masks too.
        if segment_threshold > 1:
            for i in range(len(correlated_vectors)):
                correlated_vector_intensities[i] = np.zeros(
                    len(correlated_vector_indices[i]))
                segment_mask = np.zeros_like(segment_mask)
                segment_mask[np.where(correlated_segments[i])] = 1
                segment_intensities = np.sum(self.segments.data * segment_mask,
                                             axis=(1, 2))
                for n, index in zip(
                        range(len(correlated_vector_indices[i])),
                        correlated_vector_indices[i],
                ):
                    correlated_vector_intensities[i][n] = np.sum(
                        segment_intensities[index])
        else:
            segment_intensities = np.sum(self.segments.data, axis=(1, 2))
            for i in range(len(correlated_vectors)):
                correlated_vector_intensities[i] = np.zeros(
                    len(correlated_vector_indices[i]))
                for n, index in zip(
                        range(len(correlated_vector_indices[i])),
                        correlated_vector_indices[i],
                ):
                    correlated_vector_intensities[i][n] = np.sum(
                        segment_intensities[index])

        vdfseg = VDFSegment(
            Signal2D(correlated_segments),
            DiffractionVectors(correlated_vectors),
            correlated_vector_intensities,
        )

        # Transfer axes properties of segments
        vdfseg.segments = transfer_signal_axes(vdfseg.segments, self.segments)
        n = vdfseg.segments.axes_manager.navigation_axes[0]
        n.name = "n"
        n.units = "number"

        return vdfseg
Exemple #19
0
def diffraction_vectors(request):
    dvec = DiffractionVectors(request.param)
    dvec.axes_manager.set_signal_dimension(1)
    return dvec