예제 #1
0
def test_run_refine_fastmatch_zeroshift(lt_ctx):
    shape = np.array([128, 128])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    drop = np.random.choice([True, False], size=len(indices), p=[0.9, 0.1])
    indices = indices[drop]

    radius = 10
    # Exactly between peaks, worst case
    shift = (a + b) / 2

    data_0, indices_0, peaks_0 = cbed_frame(*shape, zero, a, b, indices,
                                            radius)
    data_1, indices_1, peaks_1 = cbed_frame(*shape, zero + shift, a, b,
                                            indices, radius)

    data = np.concatenate((data_0, data_1), axis=0)

    dataset = MemoryDataSet(data=data,
                            tileshape=(1, *shape),
                            num_partitions=1,
                            sig_dims=2)
    matcher = grm.Matcher()

    match_patterns = [
        # Least reliable pattern
        common.patterns.Circular(radius=radius),
    ]

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    for match_pattern in match_patterns:
        print("refining using template %s" % type(match_pattern))
        zero_shift = np.array([(0., 0.), shift]).astype(np.float32)
        (res, real_indices) = udf.refinement.run_refine(
            ctx=lt_ctx,
            dataset=dataset,
            zero=zero + np.random.uniform(-1, 1, size=2),
            a=a + np.random.uniform(-1, 1, size=2),
            b=b + np.random.uniform(-1, 1, size=2),
            matcher=matcher,
            match_pattern=match_pattern,
            zero_shift=UDF.aux_data(zero_shift, kind='nav', extra_shape=(2, )))
        print(peaks_0 - grm.calc_coords(res['zero'].data[0], res['a'].data[0],
                                        res['b'].data[0], indices_0))

        print(peaks_1 - grm.calc_coords(res['zero'].data[1], res['a'].data[1],
                                        res['b'].data[1], indices_1))

        assert np.allclose(res['zero'].data[0], zero, atol=0.5)
        assert np.allclose(res['zero'].data[1], zero + shift, atol=0.5)
        assert np.allclose(res['a'].data, a, atol=0.2)
        assert np.allclose(res['b'].data, b, atol=0.2)
예제 #2
0
파일: utils.py 프로젝트: saisunku/LiberTEM
def _fullgrid(zero, a, b, index, skip_zero=False):
    i, j = np.mgrid[-index:index, -index:index]
    indices = np.concatenate(np.array((i, j)).T)
    if skip_zero:
        select = (np.not_equal(indices[:, 0], 0) + np.not_equal(indices[:, 1], 0))
        indices = indices[select]
    return calc_coords(zero, a, b, indices)
예제 #3
0
def test_fullmatch_three_residual(zero, a, b):
    if fm is None:
        pytest.skip("Failed to load optional dependency %s." % missing)
    aa = np.array([1.27, 1.2])
    bb = np.array([1.27, -1.2])

    grid_1 = _fullgrid(zero, a, b, 7)
    grid_2 = _fullgrid(zero, aa, bb, 4, skip_zero=True)

    random = np.array([
        (0.3, 0.5),
        (-0.3, 12.5),
        (-0.3, -17.5),
    ])

    grid = np.vstack((grid_1, grid_2, random))

    parameters = {
        'min_delta': 0.3,
        'max_delta': 3,
    }

    (matches, unmatched, weak) = fm.full_match(grid,
                                               zero=zero,
                                               parameters=parameters)

    assert (len(matches) == 2)

    assert (len(unmatched) == len(random))
    assert (len(weak) == 0)

    match1 = matches[0]

    assert (np.allclose(zero, match1.zero))
    assert (np.allclose(a, match1.a) or np.allclose(b, match1.a)
            or np.allclose(-a, match1.a) or np.allclose(-b, match1.a))
    assert (np.allclose(a, match1.b) or np.allclose(b, match1.b)
            or np.allclose(-a, match1.b) or np.allclose(-b, match1.b))
    assert (len(match1) == len(grid_1))
    assert (np.allclose(match1.calculated_refineds, grid_1))

    match2 = matches[1]

    assert (np.allclose(zero, match2.zero))
    assert (np.allclose(aa, match2.a) or np.allclose(bb, match2.a)
            or np.allclose(-aa, match2.a) or np.allclose(-bb, match2.a))
    assert (np.allclose(aa, match2.b) or np.allclose(bb, match2.b)
            or np.allclose(-aa, match2.b) or np.allclose(-bb, match2.b))
    # We always match the zero point for each lattice
    assert (len(match2) == len(grid_2) + 1)
    # We filter out the zero point, which is added in the matching routine to each matching cycle
    skip_zero = np.array([
        any(match2.indices[i] != np.array((0, 0))) for i in range(len(match2))
    ],
                         dtype=np.bool)
    # We calculate by hand because the built-in method can't skip the zero point
    assert (np.allclose(
        grm.calc_coords(match2.zero, match2.a, match2.b,
                        match2.indices[skip_zero]), grid_2))
예제 #4
0
def test_run_refine_fastmatch(lt_ctx, progress):
    shape = np.array([128, 128])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    drop = np.random.choice([True, False], size=len(indices), p=[0.9, 0.1])
    indices = indices[drop]

    radius = 10

    data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius)

    dataset = MemoryDataSet(data=data,
                            tileshape=(1, *shape),
                            num_partitions=1,
                            sig_dims=2)
    matcher = grm.Matcher()

    template = m.radial_gradient(centerX=radius + 1,
                                 centerY=radius + 1,
                                 imageSizeX=2 * radius + 2,
                                 imageSizeY=2 * radius + 2,
                                 radius=radius)

    match_patterns = [
        common.patterns.RadialGradient(radius=radius),
        common.patterns.Circular(radius=radius),
        common.patterns.BackgroundSubtraction(radius=radius),
        common.patterns.RadialGradientBackgroundSubtraction(radius=radius),
        common.patterns.UserTemplate(template=template)
    ]

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    for match_pattern in match_patterns:
        print("refining using template %s" % type(match_pattern))
        (res, real_indices) = udf.refinement.run_refine(
            ctx=lt_ctx,
            dataset=dataset,
            zero=zero + np.random.uniform(-1, 1, size=2),
            a=a + np.random.uniform(-1, 1, size=2),
            b=b + np.random.uniform(-1, 1, size=2),
            matcher=matcher,
            match_pattern=match_pattern,
            progress=progress)
        print(peaks - grm.calc_coords(res['zero'].data[0], res['a'].data[0],
                                      res['b'].data[0], indices))

        assert np.allclose(res['zero'].data[0], zero, atol=0.5)
        assert np.allclose(res['a'].data[0], a, atol=0.2)
        assert np.allclose(res['b'].data[0], b, atol=0.2)
예제 #5
0
def visualize_frame(ctx,
                    ds,
                    result,
                    indices,
                    r,
                    y,
                    x,
                    axes,
                    colors=None,
                    stretch=10):
    '''
    Visualize the refinement of a specific frame in matplotlib axes
    '''
    # Get the frame from the dataset
    get_sample_frame = ctx.create_pick_analysis(dataset=ds, y=y, x=x)
    sample_frame = ctx.run(get_sample_frame)

    if y is None:
        select = (x, )
    else:
        select = (y, x)

    d = sample_frame[0].raw_data.astype(np.float32)

    pcm = axes.imshow(np.log(d - np.min(d) + 1))

    refined = result['refineds'].data[select]
    elevations = result['peak_elevations'].data[select]
    selector = result['selector'].data[select]

    max_elevation = np.max(elevations)

    # Calclate the best fit positions to compare with the
    # individual peak positions.
    # A difference between best fit and individual peaks highlights outliers.
    calculated = grm.calc_coords(zero=result['zero'].data[select],
                                 a=result['a'].data[select],
                                 b=result['b'].data[select],
                                 indices=indices)

    paint_markers(
        axes=axes,
        r=r,
        refined=refined,
        normalized_elevations=elevations / max_elevation,
        calculated=calculated,
        selector=selector,
        zero=result['zero'].data[select],
        a=result['a'].data[select],
        b=result['b'].data[select],
        colors=colors,
        stretch=stretch,
    )
    return pcm
예제 #6
0
def test_run_refine_fastmatch(lt_ctx):
    shape = np.array([256, 256])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-3:4, -3:4]
    indices = np.concatenate(indices.T)

    radius = 10

    data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius)

    dataset = MemoryDataSet(data=data,
                            tileshape=(1, *shape),
                            num_partitions=1,
                            sig_dims=2)
    matcher = grm.Matcher()

    template = m.radial_gradient(centerX=radius + 1,
                                 centerY=radius + 1,
                                 imageSizeX=2 * radius + 2,
                                 imageSizeY=2 * radius + 2,
                                 radius=radius)

    match_patterns = [
        blobfinder.RadialGradient(radius=radius),
        blobfinder.BackgroundSubtraction(radius=radius),
        blobfinder.RadialGradientBackgroundSubtraction(radius=radius),
        blobfinder.UserTemplate(template=template)
    ]

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    for match_pattern in match_patterns:
        print("refining using template %s" % type(match_pattern))
        (res, real_indices) = blobfinder.run_refine(
            ctx=lt_ctx,
            dataset=dataset,
            zero=zero + np.random.uniform(-1, 1, size=2),
            a=a + np.random.uniform(-1, 1, size=2),
            b=b + np.random.uniform(-1, 1, size=2),
            matcher=matcher,
            match_pattern=match_pattern)
        print(peaks - grm.calc_coords(res['zero'].data[0], res['a'].data[0],
                                      res['b'].data[0], indices))

        assert np.allclose(res['zero'].data[0], zero, atol=0.5)
        assert np.allclose(res['a'].data[0], a, atol=0.2)
        assert np.allclose(res['b'].data[0], b, atol=0.2)
예제 #7
0
def visualize_frame(ctx,
                    ds,
                    result,
                    indices,
                    r,
                    y,
                    x,
                    axes,
                    colors=None,
                    stretch=10):
    # Get the frame from the dataset
    get_sample_frame = ctx.create_pick_analysis(dataset=ds, y=y, x=x)
    sample_frame = ctx.run(get_sample_frame)

    d = sample_frame[0].raw_data

    axes.imshow(np.log(d - np.min(d) + 1))

    refined = result['refineds'].data[y, x]
    elevations = result['peak_elevations'].data[y, x]
    selector = result['selector'].data[y, x]

    max_elevation = np.max(elevations)

    # Calclate the best fit positions to compare with the
    # individual peak positions.
    # A difference between best fit and individual peaks highlights outliers.
    calculated = grm.calc_coords(zero=result['zero'].data[y, x],
                                 a=result['a'].data[y, x],
                                 b=result['b'].data[y, x],
                                 indices=indices)

    paint_markers(
        axes=axes,
        r=r,
        refined=refined,
        normalized_elevations=elevations / max_elevation,
        calculated=calculated,
        selector=selector,
        zero=result['zero'].data[y, x],
        a=result['a'].data[y, x],
        b=result['b'].data[y, x],
        colors=colors,
        stretch=stretch,
    )
예제 #8
0
def test_run_refine_sparse(lt_ctx):
    shape = np.array([128, 128])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    radius = 10

    data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius)

    dataset = MemoryDataSet(data=data, tileshape=(1, *shape),
                            num_partitions=1, sig_dims=2)

    matcher = grm.Matcher()
    match_pattern = common.patterns.RadialGradient(radius=radius)

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    (res, real_indices) = udf.refinement.run_refine(
        ctx=lt_ctx,
        dataset=dataset,
        zero=zero + np.random.uniform(-0.5, 0.5, size=2),
        a=a + np.random.uniform(-0.5, 0.5, size=2),
        b=b + np.random.uniform(-0.5, 0.5, size=2),
        matcher=matcher,
        match_pattern=match_pattern,
        correlation='sparse',
        steps=3
    )

    print(peaks - grm.calc_coords(
        res['zero'].data[0],
        res['a'].data[0],
        res['b'].data[0],
        indices)
    )

    assert np.allclose(res['zero'].data[0], zero, atol=0.5)
    assert np.allclose(res['a'].data[0], a, atol=0.2)
    assert np.allclose(res['b'].data[0], b, atol=0.2)
def test_calc_coords(zero, a, b, points, indices):
    result = grm.calc_coords(zero, a, b, indices)
    assert (np.allclose(result, points))
예제 #10
0
def run_refine(ctx,
               dataset,
               zero,
               a,
               b,
               corr_params,
               match_params,
               indices=None):
    '''
    Refine the given lattice for each frame by calculating approximate peak positions and refining
    them for each frame by using the blobcorrelation and gridmatching.fastmatch().

    indices:
        Indices to refine. This is trimmed down to positions within the frame.
        As a convenience, for the indices parameter this function accepts both shape
        (n, 2) and (2, n, m) so that numpy.mgrid[h:k, i:j] works directly to specify indices.
        This saves boilerplate code when using this function. Default: numpy.mgrid[-10:10, -10:10].
    match_params['affine']: If True, use affine transformation matching. This is very fast and
        robust against a distorted field of view, but doesn't exclude outliers.


    returns:
        (result, used_indices) where result is
        {
            'centers': BufferWrapper(
                kind="nav", extra_shape=(num_disks, 2), dtype="u2"
            ),
            'refineds': BufferWrapper(
                kind="nav", extra_shape=(num_disks, 2), dtype="float32"
            ),
            'peak_values': BufferWrapper(
                kind="nav", extra_shape=(num_disks,), dtype="float32"
            ),
            'peak_elevations': BufferWrapper(
                kind="nav", extra_shape=(num_disks,), dtype="float32"
            ),
            'zero': BufferWrapper(
                kind="nav", extra_shape=(2,), dtype="float32"
            ),
            'a': BufferWrapper(
                kind="nav", extra_shape=(2,), dtype="float32"
            ),
            'b': BufferWrapper(
                kind="nav", extra_shape=(2,), dtype="float32"
            ),
            'selector': BufferWrapper(
                kind="nav", extra_shape=(num_disks,), dtype="bool"
            ),
        }
        and used_indices are the indices that were within the frame.
    '''
    if indices is None:
        indices = np.mgrid[-10:10, -10:10]
    s = indices.shape
    # Output of mgrid
    if (len(s) == 3) and (s[0] == 2):
        indices = np.concatenate(indices.T)
    # List of (i, j) pairs
    elif (len(s) == 2) and (s[1] == 2):
        pass
    else:
        raise ValueError(
            "Shape of indices is %s, expected (n, 2) or (2, n, m)" %
            str(indices.shape))

    (fy, fx) = tuple(dataset.shape.sig)

    peaks = grm.calc_coords(zero, a, b, indices).astype('int')

    selector = grm.within_frame(peaks, corr_params['radius'], fy, fx)

    peaks = peaks[selector]
    indices = indices[selector]

    result = ctx.run_udf(
        dataset=dataset,
        fn=functools.partial(
            refine,
            start_zero=zero,
            start_a=a,
            start_b=b,
            match_params=match_params,
            indices=indices,
        ),
        init=functools.partial(init_pass_2,
                               peaks=peaks,
                               parameters=corr_params),
        make_buffers=functools.partial(
            get_result_buffers_refine,
            num_disks=len(peaks),
        ),
    )
    return (result, indices)
예제 #11
0
def run_refine(ctx,
               dataset,
               zero,
               a,
               b,
               parameters,
               indices=None,
               bounds=None):
    '''
    Refine the given lattice for each frame by optimizing the correlation
    with full rendered frames.

    Full frame matching inspired by Christoph Mahr, Knut Müller-Caspary
    and the Bremen group in general

    indices:
        Indices to refine. This is trimmed down to positions within the frame.
        As a convenience, for the indices parameter this function accepts both shape
        (n, 2) and (2, n, m) so that numpy.mgrid[h:k, i:j] works directly to specify indices.
        This saves boilerplate code when using this function. Default: numpy.mgrid[-10:10, -10:10].

    returns:
        (result, used_indices) where result is
        {
            'intensity': BufferWrapper(
                kind="nav", dtype="float32"
            ),
            'zero': BufferWrapper(
                kind="nav", extra_shape=(2,), dtype="float32"
            ),
            'a': BufferWrapper(
                kind="nav", extra_shape=(2,), dtype="float32"
            ),
            'b': BufferWrapper(
                kind="nav", extra_shape=(2,), dtype="float32"
            ),
        }
        and used_indices are the indices that were within the frame.
    '''
    if indices is None:
        indices = np.mgrid[-10:10, -10:10]
    s = indices.shape
    # Output of mgrid
    if (len(s) == 3) and (s[0] == 2):
        indices = np.concatenate(indices.T)
    # List of (i, j) pairs
    elif (len(s) == 2) and (s[1] == 2):
        pass
    else:
        raise ValueError(
            "Shape of indices is %s, expected (n, 2) or (2, n, m)" %
            str(indices.shape))

    (fy, fx) = tuple(dataset.shape.sig)

    peaks = grm.calc_coords(zero, a, b, indices).astype('int')

    selector = grm.within_frame(peaks, parameters['radius'], fy, fx)

    indices = indices[selector]

    result = ctx.run_udf(
        dataset=dataset,
        fn=functools.partial(refine,
                             start_zero=zero,
                             start_a=a,
                             start_b=b,
                             indices=indices,
                             parameters=parameters,
                             bounds=bounds),
        make_buffers=get_result_buffers_refine,
    )
    return (result, indices)