Ejemplo n.º 1
0
def test_hdf5_apply_masks_1(lt_ctx, hdf5_ds_1, TYPE):
    mask = _mk_random(size=(16, 16))
    with hdf5_ds_1.get_reader().get_h5ds() as h5ds:
        data = h5ds[:]
        expected = _naive_mask_apply([mask], data)
    analysis = lt_ctx.create_mask_analysis(dataset=hdf5_ds_1,
                                           factories=[lambda: mask])
    analysis.TYPE = TYPE
    results = lt_ctx.run(analysis)

    assert np.allclose(results.mask_0.raw_data, expected)
Ejemplo n.º 2
0
def test_multi_mask_stack_force_scipy_sparse(lt_ctx, TYPE):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    masks = _mk_random(size=(2, 16, 16))
    expected = _naive_mask_apply(masks, data)

    dataset = MemoryDataSet(data=data, tileshape=(4 * 4, 4, 4), num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(
        dataset=dataset, factories=lambda: masks, use_sparse='scipy.sparse', mask_count=2
    )
    analysis.TYPE = TYPE
    results = lt_ctx.run(analysis)

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
Ejemplo n.º 3
0
def test_com_default_params(lt_ctx, TYPE):
    data = _mk_random(size=(16, 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=16,
        sig_dims=2,
    )
    analysis = lt_ctx.create_com_analysis(dataset=dataset, )
    analysis.TYPE = TYPE
    lt_ctx.run(analysis)
Ejemplo n.º 4
0
def test_sum_with_crop_frames(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data,
                            tileshape=(7, 8, 8),
                            num_partitions=2,
                            sig_dims=2)

    analysis = lt_ctx.create_sum_analysis(dataset=dataset)
    res = lt_ctx.run(analysis)
    print(data.shape, res.intensity.raw_data.shape)
    assert np.allclose(res.intensity.raw_data, np.sum(data, axis=(0, 1)))
Ejemplo n.º 5
0
def test_get_macrotile():
    data = _mk_random(size=(16, 16, 16, 16))
    dataset = MemoryDataSet(
        data=data,
        tileshape=(16, 16, 16),
        num_partitions=2,
    )

    p = next(dataset.get_partitions())
    mt = p.get_macrotile()
    assert tuple(mt.shape) == (128, 16, 16)
Ejemplo n.º 6
0
def test_com_fails_with_non_4d_data_1(lt_ctx):
    data = _mk_random(size=(16 * 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=32,
    )
    with pytest.raises(Exception):
        lt_ctx.create_com_analysis(
            dataset=dataset, cx=0, cy=0, mask_radius=8
        )
Ejemplo n.º 7
0
def test_sum_endian(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype='>u2')
    dataset = MemoryDataSet(data=data, tileshape=(8, 16, 16), num_partitions=32)
    expected = data.sum(axis=(0, 1))

    analysis = lt_ctx.create_sum_analysis(dataset=dataset)

    results = lt_ctx.run(analysis)

    assert results['intensity'].raw_data.shape == (16, 16)
    assert np.allclose(results['intensity'].raw_data, expected)
Ejemplo n.º 8
0
def test_aux_2(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="float32")
    aux_data = EchoUDF.aux_data(
        kind="nav",
        dtype="float32",
        extra_shape=(2, ),
        data=_mk_random(size=(16, 16, 2), dtype="float32"),
    )
    dataset = MemoryDataSet(data=data,
                            tileshape=(7, 16, 16),
                            num_partitions=2,
                            sig_dims=2)

    echo_udf = EchoUDF(aux=aux_data)
    res = lt_ctx.run_udf(dataset=dataset, udf=echo_udf)
    assert 'weighted' in res
    print(data.shape, res['weighted'].data.shape)
    assert np.allclose(
        res['weighted'].raw_data,
        np.sum(data, axis=(2, 3)).reshape(-1) * aux_data.raw_data[..., 0])
Ejemplo n.º 9
0
def test_com_linescan(lt_ctx):
    data = _mk_random(size=(1, 16, 16, 16))
    dataset = MemoryDataSet(
        data=data.astype("<u2"),
        tileshape=(1, 16, 16),
        num_partitions=4,
    )
    analysis = lt_ctx.create_com_analysis(
        dataset=dataset, cx=0, cy=0, mask_radius=8
    )
    lt_ctx.run(analysis)
Ejemplo n.º 10
0
def test_all_sparse_analysis(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="<u2")
    mask0 = sp.csr_matrix(_mk_random(size=(16, 16)))
    mask1 = sparse.COO.from_numpy(_mk_random(size=(16, 16)))
    expected = _naive_mask_apply([mask0, mask1], data)

    dataset = MemoryDataSet(data=data, tileshape=(4 * 4, 4, 4), num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(
        dataset=dataset, factories=[lambda: mask0, lambda: mask1]
    )
    results = lt_ctx.run(analysis)

    assert np.allclose(
        results.mask_0.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1.raw_data,
        expected[1],
    )
Ejemplo n.º 11
0
def test_avoid_calculating_masks_on_client(hdf5_ds_1):
    mask = _mk_random(size=(16, 16))
    # We have to start a local cluster so that the masks are
    # computed in a different process
    with api.Context() as ctx:
        analysis = ctx.create_mask_analysis(
            dataset=hdf5_ds_1, factories=[lambda: mask], mask_count=1, mask_dtype=np.float32
        )
        job = analysis.get_job()
        ctx.run(job)
        assert job.masks._computed_masks is None
Ejemplo n.º 12
0
def test_avoid_calculating_masks_on_client(hdf5_ds_1):
    mask = _mk_random(size=(16, 16))

    with api.Context() as ctx:
        analysis = ctx.create_mask_analysis(dataset=hdf5_ds_1,
                                            factories=[lambda: mask],
                                            mask_count=1,
                                            mask_dtype=np.float32)
        job = analysis.get_job()
        ctx.run(job)
        assert job.masks._computed_masks is None
Ejemplo n.º 13
0
def test_multi_mask_autodtype_complex(lt_ctx, TYPE):
    data = _mk_random(size=(16, 16, 16, 16), dtype="complex64")
    masks = _mk_random(size=(2, 16, 16))
    expected = _naive_mask_apply(masks, data)

    dataset = MemoryDataSet(data=data, tileshape=(4 * 4, 4, 4), num_partitions=2)
    analysis = lt_ctx.create_mask_analysis(dataset=dataset, factories=lambda: masks)
    analysis.TYPE = TYPE
    results = lt_ctx.run(analysis)

    assert results.mask_0_complex.raw_data.dtype.kind == 'c'
    assert results.mask_0_complex.raw_data.dtype == np.complex64

    assert np.allclose(
        results.mask_0_complex.raw_data,
        expected[0],
    )
    assert np.allclose(
        results.mask_1_complex.raw_data,
        expected[1],
    )
Ejemplo n.º 14
0
def test_chunked(lt_ctx, tmpdir_factory, chunks):
    datadir = tmpdir_factory.mktemp('data')
    filename = os.path.join(datadir, 'hdf5-test-chunked.h5')
    data = _mk_random((16, 16, 16, 16), dtype=np.float32)

    with h5py.File(filename, "w") as f:
        f.create_dataset("data", data=data, chunks=chunks)

    ds = lt_ctx.load("hdf5", path=filename)
    udf = PixelsumUDF()
    res = lt_ctx.run_udf(udf=udf, dataset=ds)['pixelsum']
    assert np.allclose(res, np.sum(data, axis=(2, 3)))
Ejemplo n.º 15
0
def test_pick_analysis_via_api_3_3d_ds_fail_5(lt_ctx):
    data = _mk_random(size=(16, 256, 16, 16))
    dataset = MemoryDataSet(
        data=data,
        tileshape=(1, 16, 16),
        num_partitions=2,
        sig_dims=2
    )

    analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8, "z": 11})
    with pytest.raises(ValueError):
        lt_ctx.run(analysis)
Ejemplo n.º 16
0
def test_roi_all_ones(lt_ctx):
    data = _mk_random(size=(16, 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data, tileshape=(3, 16, 16),
                            num_partitions=16, sig_dims=2)
    mask = np.ones(data.shape[:2], dtype=bool)

    pixelsum = PixelsumUDF()
    res = lt_ctx.run_udf(dataset=dataset, udf=pixelsum, roi=mask)
    assert 'pixelsum' in res
    print(data.shape, res['pixelsum'].data.shape)
    expected = np.sum(data[mask, ...], axis=(-1, -2))
    assert np.allclose(res['pixelsum'].raw_data, expected)
Ejemplo n.º 17
0
def test_start_local(hdf5_ds_1):
    mask = _mk_random(size=(16, 16))
    with hdf5_ds_1.get_reader().get_h5ds() as h5ds:
        data = h5ds[:]
        expected = _naive_mask_apply([mask], data)

    with api.Context() as ctx:
        analysis = ctx.create_mask_analysis(dataset=hdf5_ds_1,
                                            factories=[lambda: mask])
        results = ctx.run(analysis)

    assert np.allclose(results.mask_0.raw_data, expected)
Ejemplo n.º 18
0
def test_smoke(lt_ctx, progress):
    """
    just check if the analysis runs without throwing exceptions:
    """
    data = _mk_random(size=(16 * 16, 16, 16), dtype="float32")
    dataset = MemoryDataSet(data=data, tileshape=(1, 16, 16),
                            num_partitions=2, sig_dims=2)
    match_pattern = common.patterns.RadialGradient(radius=4)
    udf.correlation.run_blobfinder(
        ctx=lt_ctx, dataset=dataset, num_peaks=1, match_pattern=match_pattern,
        progress=progress
    )