def test_com_divergence(lt_ctx):
    data = np.zeros((3, 3, 3, 3), dtype=np.float32)
    for i in range(3):
        for j in range(3):
            data[i, j, i, j] = 1
    dataset = MemoryDataSet(
        data=data,
        sig_dims=2,
    )
    analysis = lt_ctx.create_com_analysis(
        dataset=dataset,
        cy=1,
        cx=1,
    )
    res = lt_ctx.run(analysis)

    print(data)
    print("y", res["y"].raw_data)
    print("x", res["x"].raw_data)
    print("divergence", res["divergence"].raw_data)
    print("curl", res["curl"].raw_data)
    assert np.all(res["x"].raw_data == [[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
    assert np.all(res["y"].raw_data == [[-1, -1, -1], [0, 0, 0], [1, 1, 1]])

    assert np.all(res["divergence"].raw_data == 2)
    assert np.all(res["curl"].raw_data == 0)
def test_numerics_succeed(lt_ctx):
    dtype = 'float64'
    # Highest expected detector resolution
    RESOLUTION = 4096
    # Highest expected detector dynamic range
    RANGE = 1e6
    # default value for all cells
    VAL = 1.1

    data = np.full((2, 2, RESOLUTION, RESOLUTION), VAL, dtype=np.float32)
    data[0, 0, 0, 0] += VAL * RANGE
    dataset = MemoryDataSet(
        data=data,
        tileshape=(2, RESOLUTION, RESOLUTION),
        num_partitions=2,
        sig_dims=2,
    )
    mask0 = np.ones((RESOLUTION, RESOLUTION), dtype=np.float32)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=[lambda: mask0],
                                           mask_count=1,
                                           mask_dtype=dtype)

    results = lt_ctx.run(analysis)
    expected = np.array(
        [[[VAL * RESOLUTION**2 + VAL * RANGE, VAL * RESOLUTION**2],
          [VAL * RESOLUTION**2, VAL * RESOLUTION**2]]])
    naive = _naive_mask_apply([mask0.astype(dtype)], data.astype(dtype))

    assert np.allclose(expected, naive)
    assert np.allclose(expected[0], results.mask_0.raw_data)
def test_com_complex_numbers(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="complex64")
    ds_complex = MemoryDataSet(
        data=data,
        tileshape=(1, 16, 16),
        num_partitions=2,
    )
    analysis = lt_ctx.create_com_analysis(dataset=ds_complex,
                                          cx=0,
                                          cy=0,
                                          mask_radius=None)
    results = lt_ctx.run(analysis)

    reshaped_data = ds_complex.data.reshape((16 * 16, 16, 16))
    field_x = results.x_real.raw_data + 1j * results.x_imag.raw_data
    field_y = results.y_real.raw_data + 1j * results.y_imag.raw_data

    field_x = field_x.reshape((16 * 16))
    field_y = field_y.reshape((16 * 16))
    for idx in range(16 * 16):
        scy, scx = measurements.center_of_mass(reshaped_data[idx])

        print(scx, field_x[idx])

        # difference between scipy and our impl: we don't divide by zero
        if np.isinf(scx):
            assert field_x[idx] == 0
            assert field_y[idx] == 0
        else:
            assert np.allclose(scx, field_x[idx])
            assert np.allclose(scy, field_y[idx])
def test_override_mask_dtype(lt_ctx):
    mask_dtype = np.float32
    data = _mk_random(size=(16, 16, 16, 16), dtype=mask_dtype)
    masks = _mk_random(size=(2, 16, 16), dtype=np.float64)
    expected = _naive_mask_apply(masks.astype(mask_dtype), data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=lambda: masks,
                                           mask_dtype=mask_dtype,
                                           mask_count=len(masks))
    results = lt_ctx.run(analysis)

    assert results.mask_0.raw_data.dtype == mask_dtype

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
def test_multi_masks(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask0 = _mk_random(size=(16, 16))
    mask1 = sp.csr_matrix(_mk_random(size=(16, 16)))
    mask2 = sparse.COO.from_numpy(_mk_random(size=(16, 16)))
    expected = _naive_mask_apply([mask0, mask1, mask2], data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(
        dataset=dataset,
        factories=[lambda: mask0, lambda: mask1, lambda: mask2])
    results = lt_ctx.run(analysis)

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
    assert np.allclose(
        results.mask_2.raw_data,
        expected[2],
    )
def test_multi_mask_force_dtype(lt_ctx):
    force_dtype = np.dtype(np.int32)
    data = _mk_random(size=(16, 16, 16, 16), dtype="int16")
    masks = _mk_random(size=(2, 16, 16), dtype="bool")
    expected = _naive_mask_apply(masks.astype(force_dtype),
                                 data.astype(force_dtype))

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=lambda: masks,
                                           dtype=force_dtype)
    results = lt_ctx.run(analysis)

    assert results.mask_0.raw_data.dtype.kind == force_dtype.kind
    assert results.mask_0.raw_data.dtype == force_dtype

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
Exemple #7
0
async def test_fd_limit(async_executor):
    import resource

    # set soft limit, throws errors but allows to raise it
    # again afterwards:
    proc = psutil.Process()
    oldlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE,
                       (proc.num_fds() + 24, oldlimit[1]))

    print("fds", proc.num_fds())

    try:
        data = _mk_random(size=(1, 16, 16), dtype='<u2')
        dataset = MemoryDataSet(data=data,
                                tileshape=(1, 16, 16),
                                num_partitions=1)

        roi = np.ones((1, ), dtype=bool)
        udf = PickUDF()

        for i in range(32):
            print(i)
            print(proc.num_fds())

            async for part in UDFRunner([udf]).run_for_dataset_async(
                    dataset=dataset,
                    executor=async_executor,
                    cancel_id="42",
                    roi=roi,
            ):
                pass
    finally:
        resource.setrlimit(resource.RLIMIT_NOFILE, oldlimit)
def test_simple_example(lt_ctx):
    # creating a dataset where 0:3 frames are strong crystalline, 3:6 frames are weak crystalline,
    #  6:9 frames are amourphous
    data = np.zeros([3*3, 5, 5]).astype(np.float32)
    # adding high intensity zero order peak for all frames
    data[:, 2, 2] = 7
    # adding strong non-zero order diffraction peaks for 0:3 frames
    data[0:3, 0, 0] = 2
    data[0:3, 4, 4] = 2
    # adding weak non-zero order diffraction peaks for 0:3 frames
    data[3:6, 2, 0] = 1
    data[3:6, 2, 4] = 1
    dataset = MemoryDataSet(data=data, tileshape=(1, 5, 5),
                            num_partitions=3, sig_dims=2)
    result, coordinates = feature.make_feature_vec(
        ctx=lt_ctx, dataset=dataset, delta=0, n_peaks=4, min_dist=0
    )
    print(result['feature_vec'].data)
    print(coordinates)
    # check if values of feature vectors are zeros for amorphous frames
    assert np.allclose(result['feature_vec'].data[6:9], 0)
    # check if all values of feature vectors are NOT zeros for strong crystalline frames
    # The strong peaks are listed first
    assert np.all(result['feature_vec'].data[0:3, 0:2])
    # check if all values of feature vector are NOT zeros for weak crystalline frames
    # (at least one peak is recognized)
    assert np.all(result['feature_vec'].data[3:6, 2:4])
    # check of feature vectors are NOT equal for strong crystalline frames
    #  than for weak crystalline frames
    # (because of non-zero order diffraction peaks are in different positions)
    assert (result['feature_vec'].data[3:6] != result['feature_vec'].data[0:3]).all()
Exemple #9
0
def test_sum_zero_roi(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype='<u2')
    dataset = MemoryDataSet(data=data,
                            tileshape=(2, 16, 16),
                            num_partitions=32)

    roi = {
        "shape": "disk",
        "cx": -1,
        "cy": -1,
        "r": 0,
    }
    analysis = SumAnalysis(dataset=dataset, parameters={
        "roi": roi,
    })

    results = lt_ctx.run(analysis)

    mask = masks.circular(roi["cx"], roi["cy"], 16, 16, roi["r"])
    assert mask.shape == (16, 16)
    assert np.count_nonzero(mask) == 0
    assert mask.dtype == np.bool

    # applying the mask flattens the first two dimensions, so we
    # only sum over axis 0 here:
    expected = data[mask, ...].sum(axis=(0, ))

    assert expected.shape == (16, 16)
    assert results['intensity'].raw_data.shape == (16, 16)

    # is not equal to results without mask:
    assert not np.allclose(results['intensity'].raw_data,
                           data.sum(axis=(0, 1)))
    # ... but rather like `expected`:
    assert np.allclose(results['intensity'].raw_data, expected)
Exemple #10
0
def test_bad_merge(lt_ctx):
    """
    Test bad example of updating buffer
    """
    data = _mk_random(size=(16 * 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data, tileshape=(1, 16, 16),
                            num_partitions=2, sig_dims=2)

    class BadmergeUDF(UDF):
        def get_result_buffers(self):
            return {
                'pixelsum': self.buffer(
                    kind="nav", dtype="float32"
                )
            }

        def process_frame(self, frame):
            self.results.pixelsum[:] = np.sum(frame)

        def merge(self, dest, src):
            # bad, because it just sets a key in dest, it doesn't copy over the data to dest
            dest['pixelsum'] = src['pixelsum']

    with pytest.raises(TypeError):
        bm = BadmergeUDF()
        lt_ctx.run_udf(dataset=dataset, udf=bm)
Exemple #11
0
def test_ssb():
    ctx = lt.Context(executor=InlineJobExecutor())
    dtype = np.float64

    scaling = 4
    shape = (29, 30, 189 // scaling, 197 // scaling)
    #  ? shape = np.random.uniform(1, 300, (4,1,))

    # The acceleration voltage U in keV
    U = 300
    # STEM pixel size in m, here 50 STEM pixels on 0.5654 nm
    dpix = 0.5654 / 50 * 1e-9
    # STEM semiconvergence angle in radians
    semiconv = 25e-3
    # Diameter of the primary beam in the diffraction pattern in pixels
    semiconv_pix = 78.6649 / scaling

    cy = 93 // scaling
    cx = 97 // scaling

    input_data = np.random.uniform(0, 1, shape)
    LG = np.linspace(1.0,
                     1000.0,
                     num=shape[0] * shape[1] * shape[2] * shape[3])
    LG = LG.reshape(shape[0], shape[1], shape[2], shape[3])

    input_data = input_data * LG
    input_data = input_data.astype(np.float64)

    udf = SSB_UDF(U=U,
                  dpix=dpix,
                  semiconv=semiconv,
                  semiconv_pix=semiconv_pix,
                  dtype=dtype,
                  cy=cy,
                  cx=cx)

    dataset = MemoryDataSet(
        data=input_data,
        tileshape=(20, shape[2], shape[3]),
        num_partitions=2,
        sig_dims=2,
    )

    result = ctx.run_udf(udf=udf, dataset=dataset)

    result_f, _, _ = reference_ssb(input_data,
                                   U=U,
                                   dpix=dpix,
                                   semiconv=semiconv,
                                   semiconv_pix=semiconv_pix,
                                   cy=cy,
                                   cx=cx)

    # atol = np.max(np.abs(result_f))*0.009

    # print(np.max(np.abs(np.abs(result['pixels']) - np.abs(result_f))))

    assert np.allclose(np.abs(result['pixels']), np.abs(result_f))
Exemple #12
0
def test_featurevector(lt_ctx):
    shape = np.array([128, 128])
    zero = shape // 2
    a = np.array([24, 0.])
    b = np.array([0., 30])
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    radius = 5
    radius_outer = 10

    template = m.background_subtraction(centerX=radius_outer + 1,
                                        centerY=radius_outer + 1,
                                        imageSizeX=radius_outer * 2 + 2,
                                        imageSizeY=radius_outer * 2 + 2,
                                        radius=radius_outer,
                                        radius_inner=radius + 1,
                                        antialiased=False)

    data, indices, peaks = cbed_frame(*shape,
                                      zero,
                                      a,
                                      b,
                                      indices,
                                      radius,
                                      all_equal=True)

    dataset = MemoryDataSet(data=data,
                            tileshape=(1, *shape),
                            num_partitions=1,
                            sig_dims=2)

    match_pattern = blobfinder.UserTemplate(template=template)

    stack = functools.partial(
        blobfinder.feature_vector,
        imageSizeX=shape[1],
        imageSizeY=shape[0],
        peaks=peaks,
        match_pattern=match_pattern,
    )

    job = lt_ctx.create_mask_job(dataset=dataset,
                                 factories=stack,
                                 mask_count=len(peaks),
                                 mask_dtype=np.float32)
    res = lt_ctx.run(job)

    peak_data, _, _ = cbed_frame(*shape,
                                 zero,
                                 a,
                                 b,
                                 np.array([(0, 0)]),
                                 radius,
                                 all_equal=True)
    peak_sum = peak_data.sum()

    assert np.allclose(res.sum(), data.sum())
    assert np.allclose(res, peak_sum)
Exemple #13
0
def test_run_refine_fastmatch_zeroshift(lt_ctx):
    shape = np.array([128, 128])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    drop = np.random.choice([True, False], size=len(indices), p=[0.9, 0.1])
    indices = indices[drop]

    radius = 10
    # Exactly between peaks, worst case
    shift = (a + b) / 2

    data_0, indices_0, peaks_0 = cbed_frame(*shape, zero, a, b, indices,
                                            radius)
    data_1, indices_1, peaks_1 = cbed_frame(*shape, zero + shift, a, b,
                                            indices, radius)

    data = np.concatenate((data_0, data_1), axis=0)

    dataset = MemoryDataSet(data=data,
                            tileshape=(1, *shape),
                            num_partitions=1,
                            sig_dims=2)
    matcher = grm.Matcher()

    match_patterns = [
        # Least reliable pattern
        common.patterns.Circular(radius=radius),
    ]

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    for match_pattern in match_patterns:
        print("refining using template %s" % type(match_pattern))
        zero_shift = np.array([(0., 0.), shift]).astype(np.float32)
        (res, real_indices) = udf.refinement.run_refine(
            ctx=lt_ctx,
            dataset=dataset,
            zero=zero + np.random.uniform(-1, 1, size=2),
            a=a + np.random.uniform(-1, 1, size=2),
            b=b + np.random.uniform(-1, 1, size=2),
            matcher=matcher,
            match_pattern=match_pattern,
            zero_shift=UDF.aux_data(zero_shift, kind='nav', extra_shape=(2, )))
        print(peaks_0 - grm.calc_coords(res['zero'].data[0], res['a'].data[0],
                                        res['b'].data[0], indices_0))

        print(peaks_1 - grm.calc_coords(res['zero'].data[1], res['a'].data[1],
                                        res['b'].data[1], indices_1))

        assert np.allclose(res['zero'].data[0], zero, atol=0.5)
        assert np.allclose(res['zero'].data[1], zero + shift, atol=0.5)
        assert np.allclose(res['a'].data, a, atol=0.2)
        assert np.allclose(res['b'].data, b, atol=0.2)
def test_masks_timeseries_2d_frames(lt_ctx):
    data = _mk_random(size=(16 * 16, 16, 16), dtype="<u2")
    dataset = MemoryDataSet(data=data, tileshape=(2, 16, 16), num_partitions=2)
    mask0 = _mk_random(size=(16, 16))
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=[lambda: mask0])
    results = lt_ctx.run(analysis)
    assert results.mask_0.raw_data.shape == (256, )
Exemple #15
0
def ds_random():
    data = _mk_random(size=(16, 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=2,
    )
    return dataset
Exemple #16
0
def test_single_frame_tiles(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask = _mk_random(size=(16, 16))
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data, tileshape=(1, 16, 16), num_partitions=2)

    _run_mask_test_program(lt_ctx, dataset, mask, expected)
def test_endian(lt_ctx, TYPE):
    data = np.random.choice(a=0xFFFF, size=(16, 16, 16, 16)).astype(">u2")
    mask = _mk_random(size=(16, 16))
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data, tileshape=(4 * 4, 4, 4), num_partitions=2)

    _run_mask_test_program(lt_ctx, dataset, mask, expected, TYPE)
Exemple #18
0
def test_sum_fft_analysis_defaults(lt_ctx):
    data = np.zeros([3 * 3, 8, 8]).astype(np.float32)
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 8, 8),
                            num_partitions=2,
                            sig_dims=2)
    analysis = SumfftAnalysis(dataset=dataset, parameters={})
    lt_ctx.run(analysis)
def test_mask_uint(lt_ctx, TYPE):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask = _mk_random(size=(16, 16)).astype("uint16")
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data, tileshape=(4 * 4, 4, 4), num_partitions=2)

    _run_mask_test_program(lt_ctx, dataset, mask, expected, TYPE)
Exemple #20
0
def test_ssb_rotate():
    ctx = lt.Context(executor=InlineJobExecutor())
    dtype = np.float64

    scaling = 4
    det = 45
    shape = (29, 30, det, det)
    #  ? shape = np.random.uniform(1, 300, (4,1,))

    # The acceleration voltage U in keV
    U = 300
    # STEM pixel size in m, here 50 STEM pixels on 0.5654 nm
    dpix = 0.5654 / 50 * 1e-9
    # STEM semiconvergence angle in radians
    semiconv = 25e-3
    # Diameter of the primary beam in the diffraction pattern in pixels
    semiconv_pix = 78.6649 / scaling

    cy = det // 2
    cx = det // 2

    input_data = (np.random.uniform(0, 1, np.prod(shape)) *
                  np.linspace(1.0, 1000.0, num=np.prod(shape)))
    input_data = input_data.astype(np.float64).reshape(shape)

    data_90deg = np.zeros_like(input_data)

    # Rotate 90 degrees clockwise
    for y in range(det):
        for x in range(det):
            data_90deg[:, :, x, det - 1 - y] = input_data[:, :, y, x]

    udf = SSB_UDF(U=U,
                  dpix=dpix,
                  semiconv=semiconv,
                  semiconv_pix=semiconv_pix,
                  dtype=dtype,
                  center=(cy, cx),
                  transformation=rotate_deg(-90.))

    dataset = MemoryDataSet(
        data=data_90deg,
        tileshape=(20, shape[2], shape[3]),
        num_partitions=2,
        sig_dims=2,
    )

    result = ctx.run_udf(udf=udf, dataset=dataset)

    result_f, _ = reference_ssb(input_data,
                                U=U,
                                dpix=dpix,
                                semiconv=semiconv,
                                semiconv_pix=semiconv_pix,
                                cy=cy,
                                cx=cx)

    assert np.allclose(result['pixels'].data, result_f)
Exemple #21
0
def test_ssb(dpix, backend, n_threads):
    lt_ctx = lt.Context(InlineJobExecutor(debug=True, inline_threads=n_threads))
    try:
        if backend == 'cupy':
            set_use_cuda(0)
        dtype = np.float64

        scaling = 4
        shape = (29, 30, 189 // scaling, 197 // scaling)

        # The acceleration voltage U in keV
        U = 300
        lamb = wavelength(U)

        # STEM semiconvergence angle in radians
        semiconv = 25e-3
        # Diameter of the primary beam in the diffraction pattern in pixels
        semiconv_pix = 78.6649 / scaling

        cy = 93 // scaling
        cx = 97 // scaling

        input_data = (
            np.random.uniform(0, 1, np.prod(shape))
            * np.linspace(1.0, 1000.0, num=np.prod(shape))
        )
        input_data = input_data.astype(np.float64).reshape(shape)

        udf = SSB_UDF(lamb=lamb, dpix=dpix, semiconv=semiconv, semiconv_pix=semiconv_pix,
                    dtype=dtype, cy=cy, cx=cx, method='subpix')

        dataset = MemoryDataSet(
            data=input_data, tileshape=(20, shape[2], shape[3]), num_partitions=2, sig_dims=2,
        )

        result = lt_ctx.run_udf(udf=udf, dataset=dataset)

        result_f, reference_masks = reference_ssb(input_data, U=U, dpix=dpix, semiconv=semiconv,
                                semiconv_pix=semiconv_pix, cy=cy, cx=cx)

        task_data = udf.get_task_data()

        udf_masks = task_data['masks'].computed_masks

        half_y = shape[0] // 2 + 1
        # Use symmetry and reshape like generate_masks()
        reference_masks = reference_masks[:half_y].reshape((half_y*shape[1], shape[2], shape[3]))

        print(np.max(np.abs(udf_masks.todense() - reference_masks)))

        print(np.max(np.abs(result['fourier'].data - result_f)))

        assert np.allclose(result['fourier'].data, result_f)
        backwards = result['amplitude'].data**2 * np.exp(1j*result['phase'].data)
        assert np.allclose(result['fourier'].data, np.fft.fft2(backwards))
    finally:
        if backend == 'cupy':
            set_use_cpu(0)
Exemple #22
0
def test_negative_sync_offset(lt_ctx):
    udf = SumSigUDF()
    data = _mk_random(size=(8, 8, 8, 8))
    sync_offset = -2

    ds_with_offset = MemoryDataSet(
        data=data,
        tileshape=(2, 8, 8),
        num_partitions=4,
        sync_offset=sync_offset,
    )

    p0 = next(ds_with_offset.get_partitions())
    assert p0._start_frame == -2
    assert p0.slice.origin == (0, 0, 0)

    tileshape = Shape((2, ) + tuple(ds_with_offset.shape.sig),
                      sig_dims=ds_with_offset.shape.sig.dims)
    tiling_scheme = TilingScheme.make_for_shape(
        tileshape=tileshape,
        dataset_shape=ds_with_offset.shape,
    )

    for p in ds_with_offset.get_partitions():
        for t in p.get_tiles(tiling_scheme=tiling_scheme):
            pass

    assert p.slice.origin == (48, 0, 0)
    assert p.slice.shape[0] == 16

    ds_with_no_offset = MemoryDataSet(
        data=data,
        tileshape=(2, 8, 8),
        num_partitions=4,
        sync_offset=0,
    )
    result = lt_ctx.run_udf(dataset=ds_with_no_offset, udf=udf)
    result = result['intensity'].raw_data[:ds_with_no_offset._meta.
                                          image_count - abs(sync_offset)]

    result_with_offset = lt_ctx.run_udf(dataset=ds_with_offset, udf=udf)
    result_with_offset = result_with_offset['intensity'].raw_data[
        abs(sync_offset):]

    assert np.allclose(result, result_with_offset)
Exemple #23
0
def test_get_tiles_by_partition_w_coords(lt_ctx, benchmark):
    data = utils._mk_random(size=(64, 64, 64, 64), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(4, 64, 64),
                            num_partitions=2,
                            sig_dims=2)

    test = Test_UDF_w_set_coords()
    benchmark(lt_ctx.run_udf, udf=test, dataset=dataset)
def test_com_fails_with_non_4d_data_1(lt_ctx):
    data = _mk_random(size=(16 * 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=32,
    )
    with pytest.raises(Exception):
        lt_ctx.create_com_analysis(dataset=dataset, cx=0, cy=0, mask_radius=8)
Exemple #25
0
def test_run_each_partition_2(dask_executor):
    data = _mk_random(size=(16, 16, 16), dtype='<u2')
    dataset = MemoryDataSet(data=data, tileshape=(1, 16, 16), num_partitions=16)
    partitions = dataset.get_partitions()

    i = 0
    for result in dask_executor.run_each_partition(partitions, lambda p: False, all_nodes=True):
        i += 1
    assert i == 0  # memory dataset doesn't have a defined location, so fn is never run
def test_with_progress_bar(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 16, 16),
                            num_partitions=2,
                            sig_dims=2)

    pixelsum = PixelsumUDF()
    res = lt_ctx.run_udf(dataset=dataset, udf=pixelsum, progress=True)
def test_correlation_methods(lt_ctx, cls, dtype, kwargs):
    shape = np.array([128, 128])
    zero = shape / 2 + np.random.uniform(-1, 1, size=2)
    a = np.array([27.17, 0.]) + np.random.uniform(-1, 1, size=2)
    b = np.array([0., 29.19]) + np.random.uniform(-1, 1, size=2)
    indices = np.mgrid[-2:3, -2:3]
    indices = np.concatenate(indices.T)

    radius = 8

    data, indices, peaks = cbed_frame(*shape, zero, a, b, indices, radius)

    dataset = MemoryDataSet(data=data, tileshape=(1, *shape),
                            num_partitions=1, sig_dims=2)

    template = m.radial_gradient(
        centerX=radius+1,
        centerY=radius+1,
        imageSizeX=2*radius+2,
        imageSizeY=2*radius+2,
        radius=radius
    )

    match_patterns = [
        common.patterns.RadialGradient(radius=radius),
        common.patterns.Circular(radius=radius),
        common.patterns.BackgroundSubtraction(radius=radius),
        common.patterns.RadialGradientBackgroundSubtraction(radius=radius),
        common.patterns.UserTemplate(template=template)
    ]

    print("zero: ", zero)
    print("a: ", a)
    print("b: ", b)

    for match_pattern in match_patterns:
        print("refining using template %s" % type(match_pattern))
        if cls is udf.correlation.SparseCorrelationUDF and kwargs.get('zero_shift'):
            with pytest.raises(ValueError):
                m_udf = cls(match_pattern=match_pattern, peaks=peaks.astype(dtype), **kwargs)
        else:
            m_udf = cls(match_pattern=match_pattern, peaks=peaks.astype(dtype), **kwargs)
            res = lt_ctx.run_udf(dataset=dataset, udf=m_udf)
            print(peaks)
            print(res['refineds'].data[0])
            print(peaks - res['refineds'].data[0])
            print(res['peak_values'].data[0])
            print(res['peak_elevations'].data[0])

            # import matplotlib.pyplot as plt
            # fig, ax = plt.subplots()
            # plt.imshow(data[0])
            # for p in np.flip(res['refineds'].data[0], axis=-1):
            #     ax.add_artist(plt.Circle(p, radius, fill=False, color='y'))
            # plt.show()

            assert np.allclose(res['refineds'].data[0], peaks, atol=0.5)
def test_sum_with_crop_frames(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data, tileshape=(7, 8, 8),
                            num_partitions=2, sig_dims=2)

    analysis = lt_ctx.create_sum_analysis(dataset=dataset)
    res = lt_ctx.run(analysis)
    print(data.shape, res.intensity.raw_data.shape)
    assert np.allclose(res.intensity.raw_data, np.sum(data, axis=(0, 1)))
Exemple #29
0
def test_get_tiles_by_frame_w_coords_roi(lt_ctx, benchmark):
    data = utils._mk_random(size=(64, 64, 64, 64), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(4, 64, 64),
                            num_partitions=2,
                            sig_dims=2)
    roi = np.random.choice([True, False], dataset.shape.nav, p=[0.9, 0.1])
    test = Test_UDF_frame_coords()
    benchmark(lt_ctx.run_udf, udf=test, dataset=dataset, roi=roi)
Exemple #30
0
def test_tiles_no_offset(lt_ctx):
    data = _mk_random(size=(8, 8, 8, 8), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(4, 8, 8),
                            num_partitions=2,
                            sig_dims=2)

    test = SimpleTestByTileZeroSyncOffsetUDF()
    lt_ctx.run_udf(dataset=dataset, udf=test)