コード例 #1
0
def test_weird_partition_shapes_1_fast(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask = _mk_random(size=(16, 16))
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data, tileshape=(1, 8, 16, 16), partition_shape=(16, 16, 8, 8))

    _run_mask_test_program(lt_ctx, dataset, mask, expected)

    p = next(dataset.get_partitions())
    t = next(p.get_tiles())
    assert tuple(t.tile_slice.shape) == (1, 8, 8, 8)
コード例 #2
0
ファイル: test_base.py プロジェクト: ozej8y/LiberTEM
def test_sweep_stackheight():
    data = _mk_random(size=(16, 16, 16, 16))
    for stackheight in range(1, 256):
        print("testing with stackheight", stackheight)
        dataset = MemoryDataSet(
            data=data.astype("<u2"),
            tileshape=(stackheight, 16, 16),
            num_partitions=2,
        )
        for p in dataset.get_partitions():
            for tile in p.get_tiles():
                pass
コード例 #3
0
def test_numerics_succeed(lt_ctx):
    dtype = 'float64'
    # Highest expected detector resolution
    RESOLUTION = 4096
    # Highest expected detector dynamic range
    RANGE = 1e6
    # default value for all cells
    VAL = 1.1

    data = np.full((2, 2, RESOLUTION, RESOLUTION), VAL, dtype=np.float32)
    data[0, 0, 0, 0] += VAL * RANGE
    dataset = MemoryDataSet(
        data=data,
        tileshape=(2, RESOLUTION, RESOLUTION),
        num_partitions=2,
        sig_dims=2,
    )
    mask0 = np.ones((RESOLUTION, RESOLUTION), dtype=np.float32)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=[lambda: mask0],
                                           mask_count=1,
                                           mask_dtype=dtype)

    results = lt_ctx.run(analysis)
    expected = np.array(
        [[[VAL * RESOLUTION**2 + VAL * RANGE, VAL * RESOLUTION**2],
          [VAL * RESOLUTION**2, VAL * RESOLUTION**2]]])
    naive = _naive_mask_apply([mask0.astype(dtype)], data.astype(dtype))

    assert np.allclose(expected, naive)
    assert np.allclose(expected[0], results.mask_0.raw_data)
コード例 #4
0
def test_override_mask_dtype(lt_ctx):
    mask_dtype = np.float32
    data = _mk_random(size=(16, 16, 16, 16), dtype=mask_dtype)
    masks = _mk_random(size=(2, 16, 16), dtype=np.float64)
    expected = _naive_mask_apply(masks.astype(mask_dtype), data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=lambda: masks,
                                           mask_dtype=mask_dtype,
                                           mask_count=len(masks))
    results = lt_ctx.run(analysis)

    assert results.mask_0.raw_data.dtype == mask_dtype

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
コード例 #5
0
def test_multi_mask_force_dtype(lt_ctx):
    force_dtype = np.dtype(np.int32)
    data = _mk_random(size=(16, 16, 16, 16), dtype="int16")
    masks = _mk_random(size=(2, 16, 16), dtype="bool")
    expected = _naive_mask_apply(masks.astype(force_dtype),
                                 data.astype(force_dtype))

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=lambda: masks,
                                           dtype=force_dtype)
    results = lt_ctx.run(analysis)

    assert results.mask_0.raw_data.dtype.kind == force_dtype.kind
    assert results.mask_0.raw_data.dtype == force_dtype

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
コード例 #6
0
def test_bad_merge(lt_ctx):
    """
    Test bad example of updating buffer
    """
    data = _mk_random(size=(16 * 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 16, 16),
                            partition_shape=(4, 16, 16),
                            sig_dims=2)

    def my_buffers():
        return {'pixelsum': BufferWrapper(kind="nav", dtype="float32")}

    def my_frame_fn(frame, pixelsum):
        pixelsum[:] = np.sum(frame)

    def bad_merge(dest, src):
        # bad, because it just sets a key in dest, it doesn't copy over the data to dest
        dest['pixelsum'] = src['pixelsum']

    with pytest.raises(TypeError):
        lt_ctx.run_udf(
            dataset=dataset,
            fn=my_frame_fn,
            merge=bad_merge,
            make_buffers=my_buffers,
        )
コード例 #7
0
ファイル: test_analysis_raw.py プロジェクト: FWin22/LiberTEM
def test_pick_analysis_via_api_3_3d_ds_fail_4(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16, 16, 16))
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 1, 16, 16, 16),
                            partition_shape=(16, 16, 16, 16),
                            effective_shape=(16, 16, 16, 16, 16, 16),
                            sig_dims=2)

    analysis = PickFrameAnalysis(dataset=dataset, parameters={})
    with pytest.raises(AssertionError):
        lt_ctx.run(analysis)

    analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7})
    with pytest.raises(AssertionError):
        lt_ctx.run(analysis)

    analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8})
    with pytest.raises(AssertionError):
        lt_ctx.run(analysis)

    analysis = PickFrameAnalysis(dataset=dataset,
                                 parameters={
                                     "x": 7,
                                     "y": 8,
                                     "z": 11
                                 })
    with pytest.raises(AssertionError):
        lt_ctx.run(analysis)
コード例 #8
0
ファイル: conftest.py プロジェクト: FWin22/LiberTEM
def ds_complex():
    data = np.random.choice(a=[0, 1, 0 + 1j, 0 - 1j, 1 + 1j, 1 - 1j],
                            size=(16, 16, 16, 16)).astype('complex64')
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 1, 16, 16),
                            partition_shape=(16, 16, 16, 16))
    return dataset
コード例 #9
0
ファイル: test_dask.py プロジェクト: ozej8y/LiberTEM
async def test_fd_limit(aexecutor):
    import resource
    import psutil
    # set soft limit, throws errors but allows to raise it
    # again afterwards:
    proc = psutil.Process()
    oldlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE, (proc.num_fds() + 24, oldlimit[1]))

    print("fds", proc.num_fds())

    try:
        data = _mk_random(size=(1, 16, 16), dtype='<u2')
        dataset = MemoryDataSet(data=data, tileshape=(1, 16, 16), num_partitions=1)

        slice_ = Slice(origin=(0, 0, 0), shape=Shape((1, 16, 16), sig_dims=2))
        job = PickFrameJob(dataset=dataset, slice_=slice_)

        for i in range(32):
            print(i)
            print(proc.num_fds())
            async for tiles in aexecutor.run_job(job):
                pass
    finally:
        resource.setrlimit(resource.RLIMIT_NOFILE, oldlimit)
コード例 #10
0
def test_kind_single(lt_ctx):
    """
    Test buffer type kind='single'

    Parameters
    ----------
    lt_ctx
        Context class for loading dataset and creating jobs on them
    """
    data = _mk_random(size=(16, 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 2, 16, 16),
                            partition_shape=(4, 4, 16, 16),
                            sig_dims=2)

    def counter_buffers():
        return {'counter': BufferWrapper(kind="single", dtype="uint32")}

    def count_frames(frame, counter):
        counter += 1

    def merge_counters(dest, src):
        dest['counter'][:] += src['counter']

    res = lt_ctx.run_udf(
        dataset=dataset,
        fn=count_frames,
        make_buffers=counter_buffers,
        merge=merge_counters,
    )
    assert 'counter' in res
    assert res['counter'].data.shape == (1, )
    assert res['counter'].data == 16 * 16
コード例 #11
0
def test_multi_masks(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask0 = _mk_random(size=(16, 16))
    mask1 = sp.csr_matrix(_mk_random(size=(16, 16)))
    mask2 = sparse.COO.from_numpy(_mk_random(size=(16, 16)))
    expected = _naive_mask_apply([mask0, mask1, mask2], data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(
        dataset=dataset,
        factories=[lambda: mask0, lambda: mask1, lambda: mask2])
    results = lt_ctx.run(analysis)

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
    assert np.allclose(
        results.mask_2.raw_data,
        expected[2],
    )
コード例 #12
0
def test_simple_example(lt_ctx):
    # creating a dataset where 0:3 frames are strong crystalline, 3:6 frames are weak crystalline,
    #  6:9 frames are amourphous
    data = np.zeros([3 * 3, 5, 5]).astype(np.float32)
    # adding high intensity zero order peak for all frames
    data[:, 2, 2] = 7
    # adding strong non-zero order diffraction peaks for 0:3 frames
    data[0:3, 0, 0] = 2
    data[0:3, 4, 4] = 2
    # adding weak non-zero order diffraction peaks for 0:3 frames
    data[3:6, 2, 0] = 1
    data[3:6, 2, 4] = 1
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 5, 5),
                            num_partitions=3,
                            sig_dims=2)
    result = crystal.run_analysis_crystall(ctx=lt_ctx,
                                           dataset=dataset,
                                           rad_in=0,
                                           rad_out=3,
                                           real_center=(2, 2),
                                           real_rad=0)
    # check if values of integration in Fourier space after deleting of zero order diffraction peaks
    #  are zeros for amorphous frames
    assert np.allclose(result['intensity'].data[6:9], np.zeros([3]))
    # check if values of integration in Fourier space after deleting of zero order diffraction peaks
    #  are NOT zeros for strong crystalline frames
    assert (result['intensity'].data[0:3] > np.zeros([3])).all()
    # check if values of integration in Fourier space after deleting of zero order diffraction peaks
    #  are NOT zeros for weak crystalline frames
    assert (result['intensity'].data[3:6] > np.zeros([3])).all()
    # check if values of integration in Fourier space after deleting of zero order diffraction peaks
    #  are higher for strong crystalline frames than for weak crystalline frames
    assert (result['intensity'].data[0:3] >
            result['intensity'].data[3:6]).all()
コード例 #13
0
def test_com_complex_numbers(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="complex64")
    ds_complex = MemoryDataSet(
        data=data,
        tileshape=(1, 16, 16),
        num_partitions=2,
    )
    analysis = lt_ctx.create_com_analysis(dataset=ds_complex,
                                          cx=0,
                                          cy=0,
                                          mask_radius=None)
    results = lt_ctx.run(analysis)

    reshaped_data = ds_complex.data.reshape((16 * 16, 16, 16))
    field_x = results.x_real.raw_data + 1j * results.x_imag.raw_data
    field_y = results.y_real.raw_data + 1j * results.y_imag.raw_data

    field_x = field_x.reshape((16 * 16))
    field_y = field_y.reshape((16 * 16))
    for idx in range(16 * 16):
        scy, scx = measurements.center_of_mass(reshaped_data[idx])

        print(scx, field_x[idx])

        # difference between scipy and our impl: we don't divide by zero
        if np.isinf(scx):
            assert field_x[idx] == 0
            assert field_y[idx] == 0
        else:
            assert np.allclose(scx, field_x[idx])
            assert np.allclose(scy, field_y[idx])
コード例 #14
0
def test_numerics(lt_ctx):
    dtype = 'float32'
    # Highest expected detector resolution
    RESOLUTION = 4096
    # Highest expected detector dynamic range
    RANGE = 1e6
    # default value for all cells
    # The test fails for 1.1 using float32!
    VAL = 1.0

    data = np.full((2, 2, RESOLUTION, RESOLUTION), VAL, dtype=dtype)
    data[0, 0, 0, 0] += VAL * RANGE
    dataset = MemoryDataSet(
        data=data,
        tileshape=(2, RESOLUTION, RESOLUTION),
        num_partitions=2,
        sig_dims=2,
    )
    mask0 = np.ones((RESOLUTION, RESOLUTION), dtype=dtype)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=[lambda: mask0])

    results = lt_ctx.run(analysis)
    expected = np.array(
        [[[VAL * RESOLUTION**2 + VAL * RANGE, VAL * RESOLUTION**2],
          [VAL * RESOLUTION**2, VAL * RESOLUTION**2]]])
    naive = _naive_mask_apply([mask0], data)

    # print(expected)
    # print(naive)
    # print(results.mask_0.raw_data)

    assert np.allclose(expected, naive)
    assert np.allclose(expected[0], results.mask_0.raw_data)
コード例 #15
0
def test_sum_frames(lt_ctx):
    """
    Test sum over the pixels for 2-dimensional dataset

    Parameters
    ----------
    lt_ctx
        Context class for loading dataset and creating jobs on them

    """
    data = _mk_random(size=(16, 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 1, 16, 16),
                            partition_shape=(4, 4, 16, 16),
                            sig_dims=2)

    def my_buffers():
        return {'pixelsum': BufferWrapper(kind="nav", dtype="float32")}

    def my_frame_fn(frame, pixelsum):
        pixelsum[:] = np.sum(frame)

    res = lt_ctx.run_udf(
        dataset=dataset,
        fn=my_frame_fn,
        make_buffers=my_buffers,
    )
    assert 'pixelsum' in res
    print(data.shape, res['pixelsum'].data.shape)
    assert np.allclose(res['pixelsum'].data, np.sum(data, axis=(2, 3)))
コード例 #16
0
def ds_random():
    data = _mk_random(size=(16, 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=2,
    )
    return dataset
コード例 #17
0
def test_single_frame_tiles(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask = _mk_random(size=(16, 16))
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data, tileshape=(1, 1, 16, 16), partition_shape=(16, 16, 16, 16))

    _run_mask_test_program(lt_ctx, dataset, mask, expected)
コード例 #18
0
def test_mask_uint(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask = _mk_random(size=(16, 16)).astype("uint16")
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data, tileshape=(4, 4, 4, 4), partition_shape=(16, 16, 16, 16))

    _run_mask_test_program(lt_ctx, dataset, mask, expected)
コード例 #19
0
def test_signed(lt_ctx):
    data = np.random.choice(a=0xFFFF, size=(16, 16, 16, 16)).astype("<i4")
    mask = _mk_random(size=(16, 16))
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data, tileshape=(4, 4, 4, 4), partition_shape=(16, 16, 16, 16))

    _run_mask_test_program(lt_ctx, dataset, mask, expected)
コード例 #20
0
def test_subframe_tiles_fast(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask = _mk_random(size=(16, 16))
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data, tileshape=(8, 4, 4), num_partitions=2)

    _run_mask_test_program(lt_ctx, dataset, mask, expected)
コード例 #21
0
def test_masks_timeseries_2d_frames(lt_ctx):
    data = _mk_random(size=(16 * 16, 16, 16), dtype="<u2")
    dataset = MemoryDataSet(data=data, tileshape=(2, 16, 16), num_partitions=2)
    mask0 = _mk_random(size=(16, 16))
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=[lambda: mask0])
    results = lt_ctx.run(analysis)
    assert results.mask_0.raw_data.shape == (256, )
コード例 #22
0
def test_weird_partition_shapes_1_fast(lt_ctx):
    # XXX MemoryDataSet is now using Partition3D and so on, so we can't create
    # partitions with weird shapes so easily anymore (in this case, partitioned in
    # the signal dimensions). maybe fix this with a custom DataSet impl that simulates this?
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask = _mk_random(size=(16, 16))
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(8, 16, 16),
                            partition_shape=(16, 16, 8, 8))

    _run_mask_test_program(lt_ctx, dataset, mask, expected)

    p = next(dataset.get_partitions())
    t = next(p.get_tiles())
    assert tuple(t.tile_slice.shape) == (1, 8, 8, 8)
コード例 #23
0
def test_com_fails_with_non_4d_data_1(lt_ctx):
    data = _mk_random(size=(16 * 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=32,
    )
    with pytest.raises(Exception):
        lt_ctx.create_com_analysis(dataset=dataset, cx=0, cy=0, mask_radius=8)
コード例 #24
0
ファイル: test_analysis_com.py プロジェクト: FWin22/LiberTEM
def ds_w_zero_frame():
    data = _mk_random(size=(16, 16, 16, 16))
    data[0, 0] = np.zeros((16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 1, 16, 16),
        partition_shape=(8, 16, 16, 16)
    )
    return dataset
コード例 #25
0
def test_endian(lt_ctx):
    data = np.random.choice(a=0xFFFF, size=(16, 16, 16, 16)).astype(">u2")
    mask = _mk_random(size=(16, 16))
    expected = _naive_mask_apply([mask], data)

    dataset = MemoryDataSet(data=data,
                            tileshape=(4 * 4, 4, 4),
                            num_partitions=2)

    _run_mask_test_program(lt_ctx, dataset, mask, expected)
コード例 #26
0
def test_com_default_params(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=16,
        sig_dims=2,
    )
    analysis = lt_ctx.create_com_analysis(dataset=dataset, )
    lt_ctx.run(analysis)
コード例 #27
0
def test_pick_analysis_via_api_3_3d_ds_fail_5(lt_ctx):
    data = _mk_random(size=(16, 256, 16, 16))
    dataset = MemoryDataSet(
        data=data,
        tileshape=(1, 16, 16),
        num_partitions=2,
        sig_dims=2
    )

    analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8, "z": 11})
    with pytest.raises(ValueError):
        lt_ctx.run(analysis)
コード例 #28
0
ファイル: test_analysis_raw.py プロジェクト: FWin22/LiberTEM
def test_get_single_frame(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16))
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 1, 16, 16),
                            partition_shape=(16, 16, 16, 16),
                            sig_dims=2)

    job = lt_ctx.create_pick_job(dataset=dataset, origin=(7, 8))
    result = lt_ctx.run(job)

    assert result.shape == (16, 16)
    assert np.allclose(result, data[7, 8])
コード例 #29
0
def test_masks_hyperspectral(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16, 16), dtype="<u2")
    dataset = MemoryDataSet(
        data=data,
        tileshape=(1, 16, 16, 16),
        num_partitions=2,
        sig_dims=3,
    )
    mask0 = _mk_random(size=(16, 16, 16))
    analysis = lt_ctx.create_mask_analysis(dataset=dataset,
                                           factories=[lambda: mask0])
    results = lt_ctx.run(analysis)
    assert results.mask_0.raw_data.shape == (16, 16)
コード例 #30
0
def test_sum_signed(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype='<i4')
    dataset = MemoryDataSet(data=data,
                            tileshape=(1, 8, 16, 16),
                            partition_shape=(1, 8, 16, 16))
    expected = data.sum(axis=(0, 1))

    analysis = lt_ctx.create_sum_analysis(dataset=dataset)

    results = lt_ctx.run(analysis)

    assert results.intensity.raw_data.shape == (16, 16)
    assert np.allclose(results.intensity.raw_data, expected)