Exemple #1
0
def test_polar_rotate():
    y, x, radians = np.random.random(3)
    ((r, phi),) = ut.make_polar(np.array([(y, x)]))
    ((r_y, r_x),) = ut.make_cartesian(np.array([(r, phi + radians)]))
    r_y2, r_x2 = ut.rotate_rad(y, x, radians)
    assert np.allclose(r_y, r_y2)
    assert np.allclose(r_x, r_x2)
Exemple #2
0
 def check(self, match):
     if len(match) < self.min_match:
         return False
     papb = make_polar(np.array([match.a, match.b]))
     if len(size_filter(papb, self.min_delta, self.max_delta)) != 2:
         return False
     return angle_check(papb[0:1], papb[1:2], self.min_angle)
Exemple #3
0
def cbed_frame(fy=128, fx=128, zero=None, a=None, b=None, indices=None, radius=4, all_equal=False):
    if zero is None:
        zero = (fy//2, fx//2)
    zero = np.array(zero)
    if a is None:
        a = (fy//8, 0)
    a = np.array(a)
    if b is None:
        b = make_cartesian(make_polar(a) - (0, np.pi/2))
    b = np.array(b)
    if indices is None:
        indices = np.mgrid[-10:11, -10:11]
    indices, peaks = frame_peaks(fy=fy, fx=fx, zero=zero, a=a, b=b, r=radius, indices=indices)

    data = np.zeros((1, fy, fx), dtype=np.float32)

    dists = np.linalg.norm(peaks - zero, axis=-1)
    max_dist = dists.max()

    for i, p in enumerate(peaks):
        data += m.circular(
            centerX=p[1],
            centerY=p[0],
            imageSizeX=fx,
            imageSizeY=fy,
            radius=radius,
            antialiased=True,
        ) * (1 if all_equal else max_dist - dists[i] + i)

    return (data, indices, peaks)
Exemple #4
0
def make_polar_vectors(coords, parameters):
    '''
    Calculate all unique pairwise connecting vectors between points in coords.

    The vectors are filtered with parameters["min_delta"] and parameters["max_delta"]
    to avoid calculating for unwanted higher order or random smaller vectors
    '''
    # sort by x coordinate so that we have always positive x difference vectors
    sort_indices = np.argsort(coords[:, 1])
    coords = coords[sort_indices]
    i, j = np.mgrid[0: len(coords), 0: len(coords)]
    selector = j > i
    deltas = coords[j[selector]] - coords[i[selector]]
    polar = make_polar(deltas)
    return size_filter(polar, parameters["min_delta"], parameters["max_delta"])
Exemple #5
0
def test_polar():
    data = np.array([
        [(0, 1), (0, 1)],
        [(1, 0), (2, 0)],
        [(-2, 0), (0, 1)],
    ])
    expected = np.array([
        [(1, 0), (1, 0)],
        [(1, np.pi/2), (2, np.pi/2)],
        [(2, -np.pi/2), (1, 0)],
    ])

    result = ut.make_polar(data)
    assert(data.shape == expected.shape)
    assert(result.shape == expected.shape)
    assert(np.allclose(expected, result))
Exemple #6
0
    def make_polar_vectors(self, coords):
        '''
        Calculate all unique pairwise connecting polar vectors between points in coords.

        The pairwise connecting vectors are converted to polar coordinates and
        filtered with parameters :py:attr:`~min_delta` and :py:attr:`~max_delta`
        to avoid calculating for unwanted higher order or random smaller vectors.

        All calculated vectors have a positive or zero x direction.
        '''
        # sort by x coordinate so that we have always positive x difference vectors
        sort_indices = np.argsort(coords[:, 1])
        coords = coords[sort_indices]
        i, j = np.mgrid[0: len(coords), 0: len(coords)]
        selector = j > i
        deltas = coords[j[selector]] - coords[i[selector]]
        polar = make_polar(deltas)
        return size_filter(polar, self.min_delta, self.max_delta)
Exemple #7
0
def polar_map(centerX, centerY, imageSizeX, imageSizeY, stretchY=1., angle=0.):
    '''
    Return a map of radius and angle.

    The optional parameters stretchY and angle allow to stretch and rotate the coordinate system
    into an elliptical form. This is useful to generate modified input data for functions that
    generate a template as a function of radius and angle.

    Parameters
    ----------

    centerX,centerY : float
        Center of the coordinate system in pixel coordinates
    imageSizeX,imageSizeY : int
        Size of the map to generate in px
    stretchY,angle : float, optional
        Stretch the radius elliptically by amount :code:`stretchY` in direction
        :code:`angle` in radians. :code:`angle = 0` means in Y direction.

    Returns
    -------

    Tuple[numpy.ndarray, numpy.ndarray]
        Map of radius and angle of shape :code:`(imageSizeY, imageSizeX)`
    '''
    y, x = np.mgrid[0:imageSizeY, 0:imageSizeX]
    dy = y - centerY
    dx = x - centerX
    if stretchY != 1.0 or angle != 0.:
        (dy, dx) = (
            (dy*np.cos(angle) - dx*np.sin(angle)) / stretchY,
            dx*np.cos(angle) + dy*np.sin(angle),
        )

    dy = dy.flatten()
    dx = dx.flatten()
    cartesians = np.stack((dy, dx)).T
    polars = make_polar(cartesians)
    return (
        polars[:, 0].reshape((imageSizeY, imageSizeX)),
        polars[:, 1].reshape((imageSizeY, imageSizeX))
    )
Exemple #8
0
    def full_match(
            self, centers, zero=None, cand=None,
            refineds=None, peak_values=None, peak_elevations=None):
        '''
        This function extracts a list of Match objects as well two PointSelection objects
        for unmatched and weak points from correlation_result and zero point.
        The zero point is included in each of the matches because it is shared between all grids.

        Parameters
        ----------
        centers : numpy.ndarray
            numpy.ndarray of shape (n, 2) with integer centers (y, x) of peaks. This would typically
            be extracted with :meth:`libertem_blobfinder.common.correlation.get_peaks`
        zero : numpy.ndarray
            Zero point as numpy array (y, x).
        cand : list or numpy.ndarray
            Optional list of candidate vectors (y, x) to use in a first matching round before
            guessing.
        refineds : numpy.ndarray
            numpy.ndarray of shape (n, 2) with float centers (y, x) of peaks (subpixel refinement)
        peak_values : numpy.ndarray
            numpy.ndarray of shape (n,) with float maxima of correlation map of peaks
        peak_elevations : numpy.ndarray
            numpy.ndarray of shape (n,) with float elevation of correlation map of peaks.
            See :meth:`libertem_blobfinder.base.correlation.peak_elevation` for details.

        Returns
        -------
        Tuple[List[libertem.analysis.gridmatching.Match, ...],\
        libertem.analysis.gridmatching.PointSelection,\
        libertem.analysis.gridmatching.PointSelection]
            matches: list of :class:`~libertem.analysis.gridmatching.Match` instances,

            unmatched: instance of :class:`~libertem.analysis.gridmatching.PointSelection`,

            weak: instance of :class:`~libertem.analysis.gridmatching.PointSelection`

        Example
        -------

        >>> peaks = np.array([
        ...     # First peak is zero if not specified otherwise
        ...     # Base lattice vectors (32, 0) and (0, 32)
        ...     (64, 64),
        ...     (32, 32), (32, 64), (32, 96),
        ...     (64, 32), (64, 96),
        ...     (96, 32), (96, 64), (96, 96),
        ... ])
        >>> matcher = FullMatcher()
        >>> (matches, unmatched, weak) = matcher.full_match(peaks)
        >>> m = matches[0]
        >>> assert np.allclose(m.zero, (64, 64))
        >>> assert np.allclose(m.a, (32, 0))
        >>> assert np.allclose(m.b, (0, 32))
        '''
        class ExitException(Exception):
            pass

        if zero is None:
            zero = centers[0]

        corr = grm.CorrelationResult(
            centers=centers,
            refineds=refineds,
            peak_values=peak_values,
            peak_elevations=peak_elevations,
        )

        matches = []

        filt = corr.peak_elevations >= self.min_weight

        working_set = grm.PointSelection(corr, selector=filt)

        zero_selector = np.array([
            np.allclose(corr.centers[i], zero)
            + np.allclose(corr.refineds[i], zero)
            for i in range(len(corr))
        ], dtype=bool)

        def listed(working_set, polar_cand):
            return polar_cand

        def guess(working_set, polar_cand):
            return self._candidates(working_set.refineds)

        if cand is not None:
            polar_cand = size_filter(
                make_polar(np.array(cand)),
                min_delta=self.min_delta,
                max_delta=self.max_delta
            )
            candidate_methods = [listed, guess]
        else:
            polar_cand = None
            candidate_methods = [guess]

        while True:
            new_selector = np.copy(working_set.selector)
            # First, find good candidate
            # Expensive operation, should be done on smaller sample
            # or sum frame result, at least for first passes to match majority
            # of peaks
            polar_candidate_vectors = candidate_methods[0](working_set, polar_cand)

            match = self._find_best_vector_match(
                point_selection=working_set, zero=zero,
                candidates=polar_candidate_vectors)
            if match is None:
                candidate_methods = candidate_methods[1:]
                if len(candidate_methods) == 0:
                    break
                else:
                    continue
            matches.append(match)
            # remove the ones that have been matched
            new_selector[match.selector] = False
            if np.count_nonzero(new_selector) >= self.min_match:
                # Add zero point that is shared by all patterns
                new_selector[zero_selector] = True
                working_set = working_set.derive(selector=new_selector)
            else:
                break
        if matches:
            new_selector[zero_selector] = False
        unmatched = working_set.derive(selector=new_selector)
        weak = grm.PointSelection(corr, selector=np.logical_not(filt))
        return (matches, unmatched, weak)
Exemple #9
0
def test_conversion(points):
    assert(np.allclose(points, ut.make_cartesian(ut.make_polar(points))))
Exemple #10
0
    def full_match(self,
                   centers,
                   zero=None,
                   cand=None,
                   refineds=None,
                   peak_values=None,
                   peak_elevations=None):
        # FIXME check formatting when included in documentation
        '''
        This function extracts a list of Match objects as well two PointSelection objects
        for unmatched and weak points from correlation_result and zero point.
        The zero point is included in each of the matches because it is shared between all grids.

        Parameters
        ----------

        centers : numpy.ndarray
            numpy.ndarray of shape (n, 2) with integer centers (y, x) of peaks. This would typically
            be extracted with :meth:`~libertem.udf.blobfinder.get_peaks`
        zero : numpy.ndarray
            Zero point as numpy array (y, x).
        cand : list or numpy.ndarray
            Optional list of candidate vectors (y, x) to use in a first matching round before
            guessing.
        refineds : numpy.ndarray
            numpy.ndarray of shape (n, 2) with float centers (y, x) of peaks (subpixel refinement)
        peak_values : numpy.ndarray
            numpy.ndarray of shape (n,) with float maxima of correlation map of peaks
        peak_elevations : numpy.ndarray
            numpy.ndarray of shape (n,) with float elevation of correlation map of peaks.
            See :meth:`~libertem.udf.blobfinder.peak_elevation` for details.

        Returns
        -------

        Tuple[List[libertem.analysis.gridmatching.Match, ...],\
        libertem.analysis.gridmatching.PointSelection,\
        libertem.analysis.gridmatching.PointSelection]
            matches: list of :class:`~libertem.analysis.gridmatching.Match` instances,

            unmatched: instance of :class:`~libertem.analysis.gridmatching.PointSelection`,

            weak: instance of :class:`~libertem.analysis.gridmatching.PointSelection`

        Example
        -------

        >>> peaks = np.array([
        ...     # First peak is zero if not specified otherwise
        ...     (64, 64),
        ...     (32, 32), (32, 64), (32, 96),
        ...     (64, 32), (64, 96),
        ...     (96, 32), (96, 64), (96, 96),
        ... ])
        >>> matcher = FullMatcher()
        >>> (matches, unmatched, weak) = matcher.full_match(peaks)
        >>> print(matches[0])
        zero: [64. 64.]
        a: [32.  0.]
        b: [2.90077857e-15 3.20000000e+01]
        '''
        class ExitException(Exception):
            pass

        if zero is None:
            zero = centers[0]

        corr = grm.CorrelationResult(
            centers=centers,
            refineds=refineds,
            peak_values=peak_values,
            peak_elevations=peak_elevations,
        )

        matches = []

        filt = corr.peak_elevations >= self.min_weight

        working_set = grm.PointSelection(corr, selector=filt)

        zero_selector = np.array([
            np.allclose(corr.centers[i], zero) +
            np.allclose(corr.refineds[i], zero) for i in range(len(corr))
        ],
                                 dtype=np.bool)

        while True:
            # First, find good candidate
            # Expensive operation, should be done on smaller sample
            # or sum frame result, at least for first passes to match majority
            # of peaks
            if cand is not None:
                polar_candidate_vectors = make_polar(np.array(cand))
                cand = None
            else:
                polar_candidate_vectors = self._candidates(
                    working_set.refineds)

            try:
                match = self._find_best_vector_match(
                    point_selection=working_set,
                    zero=zero,
                    candidates=polar_candidate_vectors)
                match = match.weighted_optimize()
                match = self._match_all(point_selection=working_set,
                                        zero=match.zero,
                                        a=match.a,
                                        b=match.b)
                if len(match) == 0:
                    raise ExitException()
                match = match.weighted_optimize()
            except (NotFoundException, np.linalg.LinAlgError, ExitException):
                new_selector = np.copy(working_set.selector)
                new_selector[zero_selector] = False
                unmatched = working_set.derive(selector=new_selector)
                break
            matches.append(match)
            new_selector = np.copy(working_set.selector)
            # remove the ones that have been matched
            new_selector[match.selector] = False
            # Test if it spans a lattice
            if sum(new_selector) >= 3:
                # Add zero point that is shared by all patterns
                new_selector[zero_selector] = True
                working_set = working_set.derive(selector=new_selector)
            else:
                # print("doesn't span a lattice")
                new_selector[zero_selector] = False
                unmatched = working_set.derive(selector=new_selector)
                break
        weak = grm.PointSelection(corr, selector=np.logical_not(filt))
        return (matches, unmatched, weak)