Exemple #1
0
def test_multi_mask_force_dtype(lt_ctx):
    force_dtype = np.dtype(np.int32)
    data = _mk_random(size=(16, 16, 16, 16), dtype="int16")
    masks = _mk_random(size=(2, 16, 16), dtype="bool")
    expected = _naive_mask_apply(masks.astype(force_dtype), data.astype(force_dtype))

    dataset = MemoryDataSet(data=data, tileshape=(4 * 4, 4, 4), num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(
        dataset=dataset, factories=lambda: masks, dtype=force_dtype
    )
    results = lt_ctx.run(analysis)

    assert results.mask_0.raw_data.dtype.kind == force_dtype.kind
    assert results.mask_0.raw_data.dtype == force_dtype

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
Exemple #2
0
def test_multi_mask_autodtype_complex_wide(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16))
    masks = _mk_random(size=(2, 16, 16), dtype="complex128")
    expected = _naive_mask_apply(masks, data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=lambda: masks)
    results = lt_ctx.run(analysis)

    assert results.mask_0_complex.raw_data.dtype.kind == 'c'
    assert results.mask_0_complex.raw_data.dtype == np.complex128

    assert np.allclose(
        results.mask_0_complex.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1_complex.raw_data,
        expected[1],
    )
Exemple #3
0
def test_simple_multi_udf_run():
    data = _mk_random(size=(32, 1860, 2048))
    dataset = MemoryDataSet(
        data=data,
        num_partitions=1,
        sig_dims=2,
        base_shape=(1, 930, 16),
        force_need_decode=True,
    )

    executor = InlineJobExecutor()
    udfs = [
        SumSigUDF(),
        SumUDF(),
    ]
    res = UDFRunner(udfs=udfs).run_for_dataset(
        dataset=dataset,
        executor=executor,
    )
    sumsigres, sumres = res.buffers
    print(sumsigres, sumres)
    assert np.allclose(sumres['intensity'], np.sum(data, axis=0))
    assert np.allclose(sumsigres['intensity'], np.sum(data, axis=(1, 2)))
Exemple #4
0
def test_all_sparse_analysis(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask0 = sp.csr_matrix(_mk_random(size=(16, 16)))
    mask1 = sparse.COO.from_numpy(_mk_random(size=(16, 16)))
    expected = _naive_mask_apply([mask0, mask1], data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(
        dataset=dataset,
        factories=[lambda: mask0, lambda: mask1],
    )
    results = lt_ctx.run(analysis)

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
def test_override_mask_dtype(lt_ctx, TYPE):
    mask_dtype = np.float32
    data = _mk_random(size=(16, 16, 16, 16), dtype=mask_dtype)
    masks = _mk_random(size=(2, 16, 16), dtype=np.float64)
    expected = _naive_mask_apply(masks.astype(mask_dtype), data)

    dataset = MemoryDataSet(data=data, tileshape=(4 * 4, 4, 4), num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(
        dataset=dataset, factories=lambda: masks, mask_dtype=mask_dtype, mask_count=len(masks),
    )
    analysis.TYPE = TYPE
    results = lt_ctx.run(analysis)

    assert results.mask_0.raw_data.dtype == mask_dtype

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
def test_com_curl_2(lt_ctx):
    data = np.zeros((3, 3, 3, 3), dtype=np.float32)
    for y in range(3):
        for x in range(3):
            data[y, x, 2 - x, y] = 1
    dataset = MemoryDataSet(
        data=data,
        sig_dims=2,
    )
    analysis = lt_ctx.create_com_analysis(dataset=dataset, cy=1, cx=1)
    res = lt_ctx.run(analysis)

    print(data)
    print("y", res["y"].raw_data)
    print("x", res["x"].raw_data)
    print("divergence", res["divergence"].raw_data)
    print("curl", res["curl"].raw_data)

    assert np.all(res["x"].raw_data == [[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
    assert np.all(res["y"].raw_data == [[1, 0, -1], [1, 0, -1], [1, 0, -1]])

    assert np.all(res["divergence"].raw_data == 0)
    assert np.all(res["curl"].raw_data == -2)
Exemple #7
0
def test_get_multiple_frames_squeeze():
    data = _mk_random(size=(16, 16, 16, 16))
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 16, 16),
                            num_partitions=2,
                            sig_dims=2)

    job = PickFrameJob(dataset=dataset,
                       squeeze=True,
                       slice_=Slice(origin=(5, 0, 0),
                                    shape=Shape((5, 16, 16), sig_dims=2)))

    executor = InlineJobExecutor()

    result = np.zeros(job.get_result_shape())
    for tiles in executor.run_job(job):
        for tile in tiles:
            tile.reduce_into_result(result)

    assert result.shape == (5, 16, 16)
    assert not np.allclose(result[0], result[1])
    assert np.allclose(result[0], data[0, 5])
    assert np.allclose(result[0:2], data[0, 5:7])
Exemple #8
0
def test_aux_tiled(lt_ctx, tileshape):
    data = _mk_random(size=(1, 5, 16, 16), dtype="float32")
    aux_data = EchoTiledUDF.aux_data(data=_mk_random(size=(1, 5, 2),
                                                     dtype="float32"),
                                     kind="nav",
                                     dtype="float32",
                                     extra_shape=(2, ))
    aux_data_2 = EchoTiledUDF.aux_data(data=_mk_random(size=(1, 5),
                                                       dtype="float32"),
                                       kind="nav",
                                       dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=tileshape,
                            num_partitions=2,
                            sig_dims=2)

    echo_udf = EchoTiledUDF(aux=aux_data, aux2=aux_data_2)
    res = lt_ctx.run_udf(dataset=dataset, udf=echo_udf)
    assert 'weighted' in res
    print(data.shape, res['weighted'].data.shape)
    assert np.allclose(
        res['weighted'].raw_data,
        np.sum(data, axis=(2, 3)).reshape(-1) * aux_data.raw_data[..., 0])
def test_no_default_merge(lt_ctx):
    """
    Test forgotten merge function if not :code:`kind='nav'`.
    """
    data = _mk_random(size=(16 * 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 16, 16),
                            num_partitions=2,
                            sig_dims=2)

    class NodefaultUDF(UDF):
        def get_result_buffers(self):
            return {
                'pixelsum_nav': self.buffer(kind="nav", dtype="float32"),
                'pixelsum': self.buffer(kind="sig", dtype="float32")
            }

        def process_frame(self, frame):
            self.results.pixelsum[:] += frame

    with pytest.raises(NotImplementedError):
        nd = NodefaultUDF()
        lt_ctx.run_udf(dataset=dataset, udf=nd)
Exemple #10
0
def test_com_complex_numbers_handcrafted_3(lt_ctx):
    data = np.ones((3, 3, 4, 4), dtype="complex64")
    data[0, 0] = np.array([
        0,    0,    0, 0,
        0,    0, 1-2j, 0,
        0,    0,    0, 0,
        0,    0,    0, 0,
    ], dtype="complex64").reshape((4, 4))
    ds_complex = MemoryDataSet(
        data=data,
        tileshape=(1, 4, 4),
        num_partitions=9,
    )
    analysis = lt_ctx.create_com_analysis(dataset=ds_complex, cx=0, cy=0, mask_radius=None)
    results = lt_ctx.run(analysis)

    print(data[0, 0])

    field_x = results.x_real.raw_data + 1j * results.x_imag.raw_data
    field_y = results.y_real.raw_data + 1j * results.y_imag.raw_data

    assert field_x[0, 0] == 2
    assert field_y[0, 0] == 1
Exemple #11
0
def test_multi_mask_autodtype_wide(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="int64")
    masks = _mk_random(size=(2, 16, 16))
    expected = _naive_mask_apply(masks, data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=lambda: masks)
    results = lt_ctx.run(analysis)

    assert results.mask_0.raw_data.dtype == np.result_type(
        np.float64, data.dtype, masks.dtype)

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
def test_pick_fft_masked(lt_ctx):
    data = _mk_random([3 * 3, 8, 8], dtype=np.float32)
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 8, 8),
                            num_partitions=2,
                            sig_dims=2)
    analysis = PickFFTFrameAnalysis(dataset=dataset,
                                    parameters={
                                        'x': 1,
                                        'real_rad': 1,
                                        'real_centerx': 1,
                                        'real_centery': 1,
                                    })
    real_mask = np.invert(
        _make_circular_mask(centerX=1,
                            centerY=1,
                            imageSizeX=8,
                            imageSizeY=8,
                            radius=1))
    fft_data = np.fft.fftshift(abs(np.fft.fft2(data[1] * real_mask)))
    res = lt_ctx.run(analysis)

    assert np.allclose(res.intensity.raw_data, fft_data)
Exemple #13
0
def test_pick_analysis_via_api_3_3d_ds_fail_1(lt_ctx):
    data = _mk_random(size=(16 * 16, 16, 16))
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 16, 16),
                            num_partitions=2,
                            sig_dims=2)

    analysis = PickFrameAnalysis(dataset=dataset, parameters={})
    with pytest.raises(ValueError):
        lt_ctx.run(analysis)

    analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8})
    with pytest.raises(ValueError):
        lt_ctx.run(analysis)

    analysis = PickFrameAnalysis(dataset=dataset,
                                 parameters={
                                     "x": 7,
                                     "y": 8,
                                     "z": 11
                                 })
    with pytest.raises(ValueError):
        lt_ctx.run(analysis)
Exemple #14
0
def test_point_3d_ds(lt_ctx):
    data = _mk_random(size=(16 * 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=2,
        sig_dims=2,
    )
    analysis = lt_ctx.create_point_analysis(dataset=dataset, x=8, y=8)
    results = lt_ctx.run(analysis)
    mask = np.zeros((16, 16))
    mask[8, 8] = 1
    expected = _naive_mask_apply([mask], dataset.data.reshape(
        (16, 16, 16, 16)))
    assert results.intensity.raw_data.shape == (16 * 16, )
    assert np.allclose(
        results.intensity.raw_data.reshape((16, 16)),
        expected,
    )
    assert np.allclose(
        results.intensity_log.raw_data.reshape((16, 16)),
        expected,
    )
Exemple #15
0
def test_sum_with_roi(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype='<u2')
    dataset = MemoryDataSet(data=data,
                            tileshape=(2, 16, 16),
                            num_partitions=32)

    roi = {
        "shape": "disk",
        "cx": 5,
        "cy": 6,
        "r": 7,
    }
    analysis = SumAnalysis(dataset=dataset, parameters={
        "roi": roi,
    })

    results = lt_ctx.run(analysis)

    mask = masks.circular(roi["cx"], roi["cy"], 16, 16, roi["r"])
    assert mask.shape == (16, 16)
    assert mask[0, 0] == 0
    assert mask[6, 5] == 1
    assert mask.dtype == bool

    # applying the mask flattens the first two dimensions, so we
    # only sum over axis 0 here:
    expected = data[mask, ...].sum(axis=(0, ))

    assert expected.shape == (16, 16)
    assert results['intensity'].raw_data.shape == (16, 16)

    # is not equal to results without mask:
    assert not np.allclose(results['intensity'].raw_data,
                           data.sum(axis=(0, 1)))
    # ... but rather like `expected`:
    assert np.allclose(results['intensity'].raw_data, expected)
    assert np.allclose(results['intensity_lin'].raw_data, expected)
Exemple #16
0
def test_numerics_fail(lt_ctx):
    dtype = 'float32'
    # Highest expected detector resolution
    RESOLUTION = 4096
    # Highest expected detector dynamic range
    RANGE = 1e6
    # default value for all cells
    VAL = 1.1

    data = np.full((2, 1, RESOLUTION, RESOLUTION), VAL, dtype=np.float32)
    data[0, 0, 0, 0] += VAL * RANGE
    dataset = MemoryDataSet(
        data=data,
        tileshape=(2, RESOLUTION, RESOLUTION),
        num_partitions=1,
        sig_dims=2,
    )
    mask0 = np.ones((RESOLUTION, RESOLUTION), dtype=np.float64)
    analysis = lt_ctx.create_mask_analysis(
        dataset=dataset, factories=[lambda: mask0], mask_count=1, mask_dtype=dtype
    )

    results = lt_ctx.run(analysis)
    expected = np.array([[
        [VAL*RESOLUTION**2 + VAL*RANGE],
        [VAL*RESOLUTION**2]
    ]])
    naive = _naive_mask_apply([mask0], data)
    naive_32 = _naive_mask_apply([mask0.astype(dtype)], data)
    # The masks are float64, that means the calculation is performed with high resolution
    # and the naive result should be correct
    assert np.allclose(expected, naive)
    # We make sure LiberTEM calculated this with the lower-precision dtype we set
    assert np.allclose(results.mask_0.raw_data, expected[0]) == np.allclose(naive_32, expected)
    # Confirm that the numerical precision is actually insufficient.
    # If this succeeds, we have to rethink the premise of this test.
    assert not np.allclose(results.mask_0.raw_data, expected[0])
Exemple #17
0
def test_auto_weird(lt_ctx):
    data = _mk_random((16, 8, 32, 64))

    dataset = MemoryDataSet(data=data,
                            tileshape=(8, 32, 64),
                            num_partitions=2,
                            sig_dims=2)

    def f(frame):
        return [
            "Shape %s" % str(frame.shape),
            dict(shape=frame.shape, sum=frame.sum()), lambda x: x,
            MemoryDataSet
        ]

    auto_result = lt_ctx.map(dataset=dataset, f=f)
    item = auto_result.data[0, 0]

    assert len(item) == 4
    assert isinstance(item[0], str)
    assert isinstance(item[1], dict)
    assert callable(item[2])
    assert item[2](1) == 1
    assert isinstance(item[3], type)
def test_bad_merge(lt_ctx):
    """
    Test bad example of updating buffer
    """
    data = _mk_random(size=(16 * 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 16, 16),
                            num_partitions=2,
                            sig_dims=2)

    class BadmergeUDF(UDF):
        def get_result_buffers(self):
            return {'pixelsum': self.buffer(kind="nav", dtype="float32")}

        def process_frame(self, frame):
            self.results.pixelsum[:] = np.sum(frame)

        def merge(self, dest, src):
            # bad, because it just sets a key in dest, it doesn't copy over the data to dest
            dest['pixelsum'] = src['pixelsum']

    with pytest.raises(TypeError):
        bm = BadmergeUDF()
        lt_ctx.run_udf(dataset=dataset, udf=bm)
def test_multi_mask_stack_force_sparse_pydata(lt_ctx, TYPE, backend):
    if backend == 'cupy':
        d = detect()
        cudas = detect()['cudas']
        if not d['cudas'] or not d['has_cupy']:
            pytest.skip("No CUDA device or no CuPy, skipping CuPy test")
    try:
        if backend == 'cupy':
            set_use_cuda(cudas[0])
        data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
        masks = _mk_random(size=(2, 16, 16))
        expected = _naive_mask_apply(masks, data)

        dataset = MemoryDataSet(data=data,
                                tileshape=(4 * 4, 4, 4),
                                num_partitions=2)
        analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                               factories=lambda: masks,
                                               use_sparse='sparse.pydata',
                                               mask_count=2)
        analysis.TYPE = TYPE
        if backend == 'cupy' and TYPE == 'UDF':
            with pytest.raises(ValueError):
                results = lt_ctx.run(analysis)
        else:
            results = lt_ctx.run(analysis)
            assert np.allclose(
                results.mask_0.raw_data,
                expected[0],
            )
            assert np.allclose(
                results.mask_1.raw_data,
                expected[1],
            )
    finally:
        set_use_cpu(0)
Exemple #20
0
def auto_ds(doctest_namespace):
    dataset = MemoryDataSet(datashape=[16, 16, 16, 16])
    doctest_namespace["dataset"] = dataset
Exemple #21
0
def test_holo_reconstruction(lt_ctx, backend):
    if backend == 'cupy':
        d = detect()
        cudas = detect()['cudas']
        if not d['cudas'] or not d['has_cupy']:
            pytest.skip("No CUDA device or no CuPy, skipping CuPy test")
    # Prepare image parameters and mesh
    nx, ny = (5, 7)
    sx, sy = (64, 64)
    slice_crop = (slice(None), slice(None), slice(sx // 4, sx // 4 * 3),
                  slice(sy // 4, sy // 4 * 3))

    lnx = np.arange(nx)
    lny = np.arange(ny)
    lsx = np.arange(sx)
    lsy = np.arange(sy)

    mnx, mny, msx, msy = np.meshgrid(lnx, lny, lsx, lsy)

    # Prepare phase image
    phase_ref = np.pi * msx * (mnx.max() - mnx) * mny / sx**2 \
        + np.pi * msy * mnx * (mny.max() - mny) / sy**2

    # Generate holograms
    holo = np.zeros_like(phase_ref)
    ref = np.zeros_like(phase_ref)

    for i in range(nx):
        for j in range(ny):
            holo[j, i, :, :] = hologram_frame(np.ones((sx, sy)),
                                              phase_ref[j, i, :, :])
            ref[j, i, :, :] = hologram_frame(np.ones((sx, sy)),
                                             np.zeros((sx, sy)))

    # Prepare LT datasets and do reconstruction
    dataset_holo = MemoryDataSet(data=holo,
                                 tileshape=(ny, sx, sy),
                                 num_partitions=2,
                                 sig_dims=2)

    dataset_ref = MemoryDataSet(data=ref,
                                tileshape=(ny, sx, sy),
                                num_partitions=1,
                                sig_dims=2)

    sb_position = [11, 6]
    sb_size = 6.26498204

    holo_job = HoloReconstructUDF(out_shape=(sx, sy),
                                  sb_position=sb_position,
                                  sb_size=sb_size)
    try:
        if backend == 'cupy':
            set_use_cuda(cudas[0])
        w_holo = lt_ctx.run_udf(dataset=dataset_holo,
                                udf=holo_job)['wave'].data
        w_ref = lt_ctx.run_udf(dataset=dataset_ref, udf=holo_job)['wave'].data
    finally:
        set_use_cpu(0)

    w = w_holo / w_ref

    phase = np.angle(w)

    assert np.allclose(phase_ref[slice_crop], phase[slice_crop], rtol=0.12)
Exemple #22
0
def test_com_parameter_guess(lt_ctx, roi):
    data = np.zeros((5, 5, 5, 5), dtype=np.float32)
    # data with negative divergence and no curl
    for i in range(3):
        for j in range(3):
            data[i+1, j+1, 3-i, 3-j] = 1
    data[0, :, 2, 2] = 1
    data[4, :, 2, 2] = 1
    data[:, 0, 2, 2] = 1
    data[:, 4, 2, 2] = 1

    data_fliprot = data.transpose(0, 1, 3, 2)

    dataset = MemoryDataSet(
        data=data,
        sig_dims=2,
    )

    analysis = lt_ctx.create_com_analysis(
        dataset=dataset,
        cy=2,
        cx=2,
        scan_rotation=0.,
        flip_y=False
    )
    res = lt_ctx.run(analysis)

    print(res.divergence.raw_data)
    print(res.curl.raw_data)

    guess = guess_corrections(res.y.raw_data, res.x.raw_data, roi=roi)
    print(guess)

    g_rot, g_flip_y, g_cy, g_cx = guess
    # Check namedtuple
    assert guess.scan_rotation == g_rot
    assert guess.flip_y == g_flip_y
    assert guess.cy == g_cy
    assert guess.cx == g_cx

    assert g_rot == 0
    assert g_flip_y is False
    assert g_cy == 0
    assert g_cx == 0

    dataset_changed = MemoryDataSet(
        data=data_fliprot,
        sig_dims=2,
    )

    analysis_changed = lt_ctx.create_com_analysis(
        dataset=dataset_changed,
        cy=3,
        cx=1,
        scan_rotation=0.,
        flip_y=False,
    )
    res_changed = lt_ctx.run(analysis_changed)

    guess = guess_corrections(res_changed.y.raw_data, res_changed.x.raw_data, roi=roi)
    print(guess)

    g_rot, g_flip_y, g_cy, g_cx = guess

    # Transposing is equivalent to flipping and rotating 90°
    assert g_rot == 90
    assert g_flip_y is True
    assert g_cy == -1
    assert g_cx == 1

    # We apply the corrections
    analysis_corrected = lt_ctx.create_com_analysis(
        dataset=dataset_changed,
        cy=3+g_cy,
        cx=1+g_cx+1,
        scan_rotation=0.+g_rot,
        flip_y=(g_flip_y is not False),
    )
    res_corrected = lt_ctx.run(analysis_corrected)

    corrected_guess = guess_corrections(res_corrected.y.raw_data, res_corrected.x.raw_data, roi=roi)
    print(corrected_guess)
    print(res_corrected.divergence.raw_data)
    print(res_corrected.curl.raw_data)

    g_rot, g_flip_y, g_cy, g_cx = corrected_guess

    # Backtransform of the shift
    g_cy, g_cx = apply_correction(g_cy, g_cx, 90, True, forward=False)

    print(g_cy, g_cx)

    assert g_rot == 0
    assert g_flip_y is False
    assert np.allclose(g_cy, 0)
    assert np.allclose(g_cx, -1)
Exemple #23
0
def test_ssb_container(dpix, lt_ctx, backend):
    try:
        if backend == 'cupy':
            set_use_cuda(0)
        dtype = np.float64

        scaling = 4
        shape = (29, 30, 189 // scaling, 197 // scaling)

        # The acceleration voltage U in keV
        U = 300
        lamb = wavelength(U)

        # STEM semiconvergence angle in radians
        semiconv = 25e-3
        # Diameter of the primary beam in the diffraction pattern in pixels
        semiconv_pix = 78.6649 / scaling

        cy = 93 // scaling
        cx = 97 // scaling

        input_data = (np.random.uniform(0, 1, np.prod(shape)) *
                      np.linspace(1.0, 1000.0, num=np.prod(shape)))
        input_data = input_data.astype(np.float64).reshape(shape)

        masks = generate_masks(reconstruct_shape=shape[:2],
                               mask_shape=shape[2:],
                               dtype=dtype,
                               lamb=lamb,
                               dpix=dpix,
                               semiconv=semiconv,
                               semiconv_pix=semiconv_pix,
                               cy=cy,
                               cx=cx,
                               method='subpix')

        mask_container = MaskContainer(
            mask_factories=lambda: masks,
            dtype=masks.dtype,
            use_sparse='scipy.sparse.csc',
            count=masks.shape[0],
        )

        udf = SSB_UDF(lamb=lamb,
                      dpix=dpix,
                      semiconv=semiconv,
                      semiconv_pix=semiconv_pix,
                      dtype=dtype,
                      cy=cy,
                      cx=cx,
                      mask_container=mask_container)

        dataset = MemoryDataSet(
            data=input_data,
            tileshape=(20, shape[2], shape[3]),
            num_partitions=2,
            sig_dims=2,
        )

        result = lt_ctx.run_udf(udf=udf, dataset=dataset)

        result_f, reference_masks = reference_ssb(input_data,
                                                  U=U,
                                                  dpix=dpix,
                                                  semiconv=semiconv,
                                                  semiconv_pix=semiconv_pix,
                                                  cy=cy,
                                                  cx=cx)

        task_data = udf.get_task_data()

        udf_masks = task_data['masks'].computed_masks

        half_y = shape[0] // 2 + 1
        # Use symmetry and reshape like generate_masks()
        reference_masks = reference_masks[:half_y].reshape(
            (half_y * shape[1], shape[2], shape[3]))

        print(np.max(np.abs(udf_masks.todense() - reference_masks)))

        print(np.max(np.abs(result['pixels'].data - result_f)))

        assert np.allclose(result['pixels'].data, result_f)
    finally:
        if backend == 'cupy':
            set_use_cpu(0)
Exemple #24
0
def test_symmetries(lt_ctx, TYPE):
    (d1, i1, p1) = cbed_frame(all_equal=True,
                              radius=3,
                              indices=np.array([(1, 0)]))
    (d2, i2, p2) = cbed_frame(all_equal=True,
                              radius=3,
                              indices=np.array([(-1, 0)]))
    (d3, i3, p3) = cbed_frame(all_equal=True,
                              radius=3,
                              indices=np.array([(1, 0), (-1, 0)]))
    (d4, i4, p4) = cbed_frame(all_equal=True,
                              radius=3,
                              indices=np.array([(1, 0), (-1, 0), (0, 1),
                                                (0, -1)]))

    data = np.zeros((2, 2, *d1[0].shape))
    data[0, 0] = d1[0]
    data[0, 1] = d2[0]
    data[1, 0] = d3[0]
    data[1, 1] = d4[0]

    ds = MemoryDataSet(data=data)

    r = np.linalg.norm(p2[0] - p1[0]) / 2
    cy, cx = (p2[0] + p1[0]) / 2

    analysis = lt_ctx.create_radial_fourier_analysis(dataset=ds,
                                                     cy=cy,
                                                     cx=cx,
                                                     ri=0,
                                                     ro=r + 4,
                                                     n_bins=2,
                                                     max_order=8)
    analysis.TYPE = TYPE

    results = lt_ctx.run(analysis)

    c_0_0 = results.complex_0_0.raw_data
    c_1_0 = results.complex_1_0.raw_data

    assert np.allclose(np.abs(c_0_0), 0)
    assert np.allclose(np.abs(c_1_0), data.sum(axis=(2, 3)))

    c_0_1 = results.complex_0_1.raw_data
    c_1_1 = results.complex_1_1.raw_data

    assert np.allclose(np.abs(c_0_1), 0)

    assert np.allclose(np.abs(c_1_1[1, 0]), 0)
    assert np.allclose(np.abs(c_1_1[1, 1]), 0)

    assert np.all(np.abs(c_1_1[0, 0]) > 0)
    assert np.all(np.abs(c_1_1[0, 1]) > 0)
    assert np.allclose(np.angle(c_1_1[0, 0]), np.pi / 2)
    assert np.allclose(np.angle(c_1_1[0, 1]), -np.pi / 2)

    c_0_2 = results.complex_0_2.raw_data
    c_1_2 = results.complex_1_2.raw_data

    assert np.allclose(np.abs(c_0_2), 0)

    # 2-fold suppressed for 4-fold symmetry
    assert np.allclose(np.abs(c_1_2[1, 1]), 0)

    assert np.all(np.abs(c_1_2[0, 0]) > 0)
    assert np.all(np.abs(c_1_2[0, 1]) > 0)
    assert np.all(np.abs(c_1_2[1, 0]) > 0)
    # Discontinuity at this point, can be pi or -pi
    assert np.allclose(np.abs(np.angle(c_1_2[0, 0])), np.pi)
    assert np.allclose(np.abs(np.angle(c_1_2[0, 1])), np.pi)
    assert np.allclose(np.abs(np.angle(c_1_2[1, 0])), np.pi)

    c_0_3 = results.complex_0_3.raw_data
    c_1_3 = results.complex_1_3.raw_data

    assert np.allclose(np.abs(c_0_3), 0)
    # odd harmonics suppressed in 2-fold symmetry
    assert np.allclose(np.abs(c_1_3[1]), 0)

    assert np.all(np.abs(c_1_3[0, 0]) > 0)
    assert np.all(np.abs(c_1_3[0, 1]) > 0)
    assert np.allclose(np.angle(c_1_3[0, 0]), -np.pi / 2)
    assert np.allclose(np.angle(c_1_3[0, 1]), np.pi / 2)

    c_0_4 = results.complex_0_4.raw_data
    c_1_4 = results.complex_1_4.raw_data

    assert np.allclose(np.abs(c_0_4), 0)

    assert np.all(np.abs(c_1_4) > 0)

    assert np.allclose(np.angle(c_1_4[0, 0]), 0)
    assert np.allclose(np.angle(c_1_4[0, 1]), 0)
    assert np.allclose(np.angle(c_1_4[1, 0]), 0)
    assert np.allclose(np.angle(c_1_4[1, 1]), 0)

    c_0_5 = results.complex_0_5.raw_data
    c_1_5 = results.complex_1_5.raw_data

    assert np.allclose(np.abs(c_0_5), 0)

    # odd harmonics suppressed in 2-fold symmetry
    assert np.allclose(np.abs(c_1_5[1]), 0)

    c_0_7 = results.complex_0_5.raw_data
    c_1_7 = results.complex_1_5.raw_data

    assert np.allclose(np.abs(c_0_7), 0)

    # odd harmonics suppressed in 2-fold symmetry
    assert np.allclose(np.abs(c_1_7[1]), 0)

    c_0_8 = results.complex_0_4.raw_data
    c_1_8 = results.complex_1_4.raw_data

    assert np.allclose(np.abs(c_0_8), 0)

    assert np.all(np.abs(c_1_8) > 0)

    assert np.allclose(np.angle(c_1_8[0, 0]), 0)
    assert np.allclose(np.angle(c_1_8[0, 1]), 0)
    assert np.allclose(np.angle(c_1_8[1, 0]), 0)
    assert np.allclose(np.angle(c_1_8[1, 1]), 0)
def test_run_refine_fastmatch(lt_ctx, progress):
    shape = np.array([128, 128])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    drop = np.random.choice([True, False], size=len(indices), p=[0.9, 0.1])
    indices = indices[drop]

    radius = 10

    data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius)

    dataset = MemoryDataSet(data=data, tileshape=(1, *shape),
                            num_partitions=1, sig_dims=2)
    matcher = grm.Matcher()

    template = m.radial_gradient(
        centerX=radius+1,
        centerY=radius+1,
        imageSizeX=2*radius+2,
        imageSizeY=2*radius+2,
        radius=radius
    )

    match_patterns = [
        common.patterns.RadialGradient(radius=radius),
        common.patterns.Circular(radius=radius),
        common.patterns.BackgroundSubtraction(radius=radius),
        common.patterns.RadialGradientBackgroundSubtraction(radius=radius),
        common.patterns.UserTemplate(template=template)
    ]

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    for match_pattern in match_patterns:
        print("refining using template %s" % type(match_pattern))
        (res, real_indices) = udf.refinement.run_refine(
            ctx=lt_ctx,
            dataset=dataset,
            zero=zero + np.random.uniform(-1, 1, size=2),
            a=a + np.random.uniform(-1, 1, size=2),
            b=b + np.random.uniform(-1, 1, size=2),
            matcher=matcher,
            match_pattern=match_pattern,
            progress=progress
        )
        print(peaks - grm.calc_coords(
            res['zero'].data[0],
            res['a'].data[0],
            res['b'].data[0],
            indices)
        )

        assert np.allclose(res['zero'].data[0], zero, atol=0.5)
        assert np.allclose(res['a'].data[0], a, atol=0.2)
        assert np.allclose(res['b'].data[0], b, atol=0.2)
def test_run_refine_fastmatch_zeroshift(lt_ctx):
    shape = np.array([128, 128])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    drop = np.random.choice([True, False], size=len(indices), p=[0.9, 0.1])
    indices = indices[drop]

    radius = 10
    # Exactly between peaks, worst case
    shift = (a + b) / 2

    data_0, indices_0, peaks_0 = cbed_frame(*shape, zero, a, b, indices, radius)
    data_1, indices_1, peaks_1 = cbed_frame(*shape, zero + shift, a, b, indices, radius)

    data = np.concatenate((data_0, data_1), axis=0)

    dataset = MemoryDataSet(data=data, tileshape=(1, *shape),
                            num_partitions=1, sig_dims=2)
    matcher = grm.Matcher()

    match_patterns = [
        # Least reliable pattern
        common.patterns.Circular(radius=radius),
    ]

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    for match_pattern in match_patterns:
        print("refining using template %s" % type(match_pattern))
        zero_shift = np.array([(0., 0.), shift]).astype(np.float32)
        (res, real_indices) = udf.refinement.run_refine(
            ctx=lt_ctx,
            dataset=dataset,
            zero=zero + np.random.uniform(-1, 1, size=2),
            a=a + np.random.uniform(-1, 1, size=2),
            b=b + np.random.uniform(-1, 1, size=2),
            matcher=matcher,
            match_pattern=match_pattern,
            zero_shift=UDF.aux_data(zero_shift, kind='nav', extra_shape=(2,))
        )
        print(peaks_0 - grm.calc_coords(
            res['zero'].data[0],
            res['a'].data[0],
            res['b'].data[0],
            indices_0)
        )

        print(peaks_1 - grm.calc_coords(
            res['zero'].data[1],
            res['a'].data[1],
            res['b'].data[1],
            indices_1)
        )

        assert np.allclose(res['zero'].data[0], zero, atol=0.5)
        assert np.allclose(res['zero'].data[1], zero + shift, atol=0.5)
        assert np.allclose(res['a'].data, a, atol=0.2)
        assert np.allclose(res['b'].data, b, atol=0.2)
Exemple #27
0
def test_correlation_methods(lt_ctx, cls, dtype, kwargs):
    shape = np.array([128, 128])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    radius = 8

    data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius)

    dataset = MemoryDataSet(data=data,
                            tileshape=(1, *shape),
                            num_partitions=1,
                            sig_dims=2)

    template = m.radial_gradient(centerX=radius + 1,
                                 centerY=radius + 1,
                                 imageSizeX=2 * radius + 2,
                                 imageSizeY=2 * radius + 2,
                                 radius=radius)

    match_patterns = [
        common.patterns.RadialGradient(radius=radius),
        common.patterns.Circular(radius=radius),
        common.patterns.BackgroundSubtraction(radius=radius),
        common.patterns.RadialGradientBackgroundSubtraction(radius=radius),
        common.patterns.UserTemplate(template=template)
    ]

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    for match_pattern in match_patterns:
        print("refining using template %s" % type(match_pattern))
        if cls is udf.correlation.SparseCorrelationUDF and kwargs.get(
                'zero_shift'):
            with pytest.raises(ValueError):
                m_udf = cls(match_pattern=match_pattern,
                            peaks=peaks.astype(dtype),
                            **kwargs)
        else:
            m_udf = cls(match_pattern=match_pattern,
                        peaks=peaks.astype(dtype),
                        **kwargs)
            res = lt_ctx.run_udf(dataset=dataset, udf=m_udf)
            print(peaks)
            print(res['refineds'].data[0])
            print(peaks - res['refineds'].data[0])
            print(res['peak_values'].data[0])
            print(res['peak_elevations'].data[0])

            # import matplotlib.pyplot as plt
            # fig, ax = plt.subplots()
            # plt.imshow(data[0])
            # for p in np.flip(res['refineds'].data[0], axis=-1):
            #     ax.add_artist(plt.Circle(p, radius, fill=False, color='y'))
            # plt.show()

            assert np.allclose(res['refineds'].data[0], peaks, atol=0.5)