Example #1
0
def test_ssb():
    ctx = lt.Context(executor=InlineJobExecutor())
    dtype = np.float64

    scaling = 4
    shape = (29, 30, 189 // scaling, 197 // scaling)
    #  ? shape = np.random.uniform(1, 300, (4,1,))

    # The acceleration voltage U in keV
    U = 300
    # STEM pixel size in m, here 50 STEM pixels on 0.5654 nm
    dpix = 0.5654 / 50 * 1e-9
    # STEM semiconvergence angle in radians
    semiconv = 25e-3
    # Diameter of the primary beam in the diffraction pattern in pixels
    semiconv_pix = 78.6649 / scaling

    cy = 93 // scaling
    cx = 97 // scaling

    input_data = np.random.uniform(0, 1, shape)
    LG = np.linspace(1.0,
                     1000.0,
                     num=shape[0] * shape[1] * shape[2] * shape[3])
    LG = LG.reshape(shape[0], shape[1], shape[2], shape[3])

    input_data = input_data * LG
    input_data = input_data.astype(np.float64)

    udf = SSB_UDF(U=U,
                  dpix=dpix,
                  semiconv=semiconv,
                  semiconv_pix=semiconv_pix,
                  dtype=dtype,
                  cy=cy,
                  cx=cx)

    dataset = MemoryDataSet(
        data=input_data,
        tileshape=(20, shape[2], shape[3]),
        num_partitions=2,
        sig_dims=2,
    )

    result = ctx.run_udf(udf=udf, dataset=dataset)

    result_f, _, _ = reference_ssb(input_data,
                                   U=U,
                                   dpix=dpix,
                                   semiconv=semiconv,
                                   semiconv_pix=semiconv_pix,
                                   cy=cy,
                                   cx=cx)

    # atol = np.max(np.abs(result_f))*0.009

    # print(np.max(np.abs(np.abs(result['pixels']) - np.abs(result_f))))

    assert np.allclose(np.abs(result['pixels']), np.abs(result_f))
Example #2
0
def test_ssb_rotate():
    ctx = lt.Context(executor=InlineJobExecutor())
    dtype = np.float64

    scaling = 4
    det = 45
    shape = (29, 30, det, det)
    #  ? shape = np.random.uniform(1, 300, (4,1,))

    # The acceleration voltage U in keV
    U = 300
    # STEM pixel size in m, here 50 STEM pixels on 0.5654 nm
    dpix = 0.5654 / 50 * 1e-9
    # STEM semiconvergence angle in radians
    semiconv = 25e-3
    # Diameter of the primary beam in the diffraction pattern in pixels
    semiconv_pix = 78.6649 / scaling

    cy = det // 2
    cx = det // 2

    input_data = (np.random.uniform(0, 1, np.prod(shape)) *
                  np.linspace(1.0, 1000.0, num=np.prod(shape)))
    input_data = input_data.astype(np.float64).reshape(shape)

    data_90deg = np.zeros_like(input_data)

    # Rotate 90 degrees clockwise
    for y in range(det):
        for x in range(det):
            data_90deg[:, :, x, det - 1 - y] = input_data[:, :, y, x]

    udf = SSB_UDF(U=U,
                  dpix=dpix,
                  semiconv=semiconv,
                  semiconv_pix=semiconv_pix,
                  dtype=dtype,
                  center=(cy, cx),
                  transformation=rotate_deg(-90.))

    dataset = MemoryDataSet(
        data=data_90deg,
        tileshape=(20, shape[2], shape[3]),
        num_partitions=2,
        sig_dims=2,
    )

    result = ctx.run_udf(udf=udf, dataset=dataset)

    result_f, _ = reference_ssb(input_data,
                                U=U,
                                dpix=dpix,
                                semiconv=semiconv,
                                semiconv_pix=semiconv_pix,
                                cy=cy,
                                cx=cx)

    assert np.allclose(result['pixels'].data, result_f)
Example #3
0
def test_ssb(dpix, backend, n_threads):
    lt_ctx = lt.Context(InlineJobExecutor(debug=True, inline_threads=n_threads))
    try:
        if backend == 'cupy':
            set_use_cuda(0)
        dtype = np.float64

        scaling = 4
        shape = (29, 30, 189 // scaling, 197 // scaling)

        # The acceleration voltage U in keV
        U = 300
        lamb = wavelength(U)

        # STEM semiconvergence angle in radians
        semiconv = 25e-3
        # Diameter of the primary beam in the diffraction pattern in pixels
        semiconv_pix = 78.6649 / scaling

        cy = 93 // scaling
        cx = 97 // scaling

        input_data = (
            np.random.uniform(0, 1, np.prod(shape))
            * np.linspace(1.0, 1000.0, num=np.prod(shape))
        )
        input_data = input_data.astype(np.float64).reshape(shape)

        udf = SSB_UDF(lamb=lamb, dpix=dpix, semiconv=semiconv, semiconv_pix=semiconv_pix,
                    dtype=dtype, cy=cy, cx=cx, method='subpix')

        dataset = MemoryDataSet(
            data=input_data, tileshape=(20, shape[2], shape[3]), num_partitions=2, sig_dims=2,
        )

        result = lt_ctx.run_udf(udf=udf, dataset=dataset)

        result_f, reference_masks = reference_ssb(input_data, U=U, dpix=dpix, semiconv=semiconv,
                                semiconv_pix=semiconv_pix, cy=cy, cx=cx)

        task_data = udf.get_task_data()

        udf_masks = task_data['masks'].computed_masks

        half_y = shape[0] // 2 + 1
        # Use symmetry and reshape like generate_masks()
        reference_masks = reference_masks[:half_y].reshape((half_y*shape[1], shape[2], shape[3]))

        print(np.max(np.abs(udf_masks.todense() - reference_masks)))

        print(np.max(np.abs(result['fourier'].data - result_f)))

        assert np.allclose(result['fourier'].data, result_f)
        backwards = result['amplitude'].data**2 * np.exp(1j*result['phase'].data)
        assert np.allclose(result['fourier'].data, np.fft.fft2(backwards))
    finally:
        if backend == 'cupy':
            set_use_cpu(0)
Example #4
0
def test_start_local_cupyonly(hdf5_ds_1):
    cudas = detect()['cudas']
    # Make sure we have enough partitions
    hdf5_ds_1.set_num_cores(len(cudas))
    mask = _mk_random(size=(16, 16))
    with hdf5_ds_1.get_reader().get_h5ds() as h5ds:
        data = h5ds[:]
        expected = _naive_mask_apply([mask], data)

    spec = cluster_spec(cpus=(), cudas=cudas, has_cupy=True)
    with DaskJobExecutor.make_local(spec=spec) as executor:
        ctx = api.Context(executor=executor)
        # Uses ApplyMasksUDF, which supports CuPy
        analysis = ctx.create_mask_analysis(dataset=hdf5_ds_1,
                                            factories=[lambda: mask])
        results = ctx.run(analysis)
        udf_res = ctx.run_udf(udf=DebugDeviceUDF(), dataset=hdf5_ds_1)
        # No CPU compute resources
        with pytest.raises(RuntimeError):
            _ = ctx.run_udf(udf=DebugDeviceUDF(backends=('numpy', )),
                            dataset=hdf5_ds_1)
        cuda_res = ctx.run_udf(udf=DebugDeviceUDF(backends=('cuda', )),
                               dataset=hdf5_ds_1)

    assert np.allclose(results.mask_0.raw_data, expected)

    found = {}

    for val in udf_res['device_id'].data[0].values():
        print(val)
        # no CPU
        assert val["cpu"] is None
        # Register which GPUs got work
        found[val["cuda"]] = True

    for val in cuda_res['device_id'].data[0].values():
        print(val)
        # no CPU
        assert val["cpu"] is None
        # Register which GPUs got work
        found[val["cuda"]] = True

    for val in udf_res['backend'].data[0].values():
        # use CuPy
        print(val)
        assert 'cupy' in val

    for val in cuda_res['backend'].data[0].values():
        # no CuPy, i.e. NumPy
        print(val)
        assert 'numpy' in val

    # Test if each GPU got work. We have to see if this
    # actually works always since this depends on the scheduler behavior
    assert set(found.keys()) == set(cudas)

    assert np.all(udf_res['device_class'].data == 'cuda')
    assert np.allclose(udf_res['on_device'].data, data.sum(axis=(0, 1)))
Example #5
0
def ipy_ctx():
    import ipyparallel
    client = ipyparallel.Client()
    # wait for two engines: see also docker-compose.yml where the engines are started
    client.wait_for_engines(2)
    dask_client = client.become_dask()
    executor = DaskJobExecutor(client=dask_client, is_local=False)
    with lt.Context(executor=executor) as ctx:
        yield ctx
Example #6
0
def test_ssb_roi():
    ctx = lt.Context(executor=InlineJobExecutor())
    dtype = np.float64

    scaling = 4
    shape = (29, 30, 189 // scaling, 197 // scaling)
    #  ? shape = np.random.uniform(1, 300, (4,1,))

    # The acceleration voltage U in keV
    U = 300
    lamb = wavelength(U)
    # STEM pixel size in m, here 50 STEM pixels on 0.5654 nm
    dpix = 0.5654 / 50 * 1e-9
    # STEM semiconvergence angle in radians
    semiconv = 25e-3
    # Diameter of the primary beam in the diffraction pattern in pixels
    semiconv_pix = 78.6649 / scaling

    cy = 93 // scaling
    cx = 97 // scaling

    input_data = (np.random.uniform(0, 1, np.prod(shape)) *
                  np.linspace(1.0, 1000.0, num=np.prod(shape)))
    input_data = input_data.astype(np.float64).reshape(shape)

    udf = SSB_UDF(lamb=lamb,
                  dpix=dpix,
                  semiconv=semiconv,
                  semiconv_pix=semiconv_pix,
                  dtype=dtype,
                  cy=cy,
                  cx=cx)

    dataset = MemoryDataSet(
        data=input_data,
        tileshape=(20, shape[2], shape[3]),
        num_partitions=2,
        sig_dims=2,
    )

    roi_1 = np.random.choice([True, False], shape[:2])
    roi_2 = np.invert(roi_1)

    result_1 = ctx.run_udf(udf=udf, dataset=dataset, roi=roi_1)
    result_2 = ctx.run_udf(udf=udf, dataset=dataset, roi=roi_2)

    result_f, _ = reference_ssb(input_data,
                                U=U,
                                dpix=dpix,
                                semiconv=semiconv,
                                semiconv_pix=semiconv_pix,
                                cy=cy,
                                cx=cx)

    assert np.allclose(result_1['pixels'].data + result_2['pixels'].data,
                       result_f)
def test_avoid_calculating_masks_on_client_udf(hdf5_ds_1):
    mask = _mk_random(size=(16, 16))
    # We have to start a local cluster so that the masks are
    # computed in a different process
    with api.Context() as ctx:
        analysis = ctx.create_mask_analysis(
            dataset=hdf5_ds_1, factories=[lambda: mask], mask_count=1, mask_dtype=np.float32
        )
        udf = analysis.get_udf()
        ctx.run_udf(udf=udf, dataset=hdf5_ds_1)
        assert udf._mask_container is None
Example #8
0
def test_avoid_calculating_masks_on_client(hdf5_ds_1):
    mask = _mk_random(size=(16, 16))

    with api.Context() as ctx:
        analysis = ctx.create_mask_analysis(dataset=hdf5_ds_1,
                                            factories=[lambda: mask],
                                            mask_count=1,
                                            mask_dtype=np.float32)
        job = analysis.get_job()
        ctx.run(job)
        assert job.masks._computed_masks is None
Example #9
0
def test_start_local(hdf5_ds_1):
    mask = _mk_random(size=(16, 16))
    with hdf5_ds_1.get_reader().get_h5ds() as h5ds:
        data = h5ds[:]
        expected = _naive_mask_apply([mask], data)

    with api.Context() as ctx:
        analysis = ctx.create_mask_analysis(dataset=hdf5_ds_1,
                                            factories=[lambda: mask])
        results = ctx.run(analysis)

    assert np.allclose(results.mask_0.raw_data, expected)
Example #10
0
def dist_ctx(scheduler_addr):
    """
    This Context needs to have an external dask cluster running, with the following
    assumptions:

     - two workers: hostnames worker-1 and worker-2
     - one scheduler node
     - data availability TBD
    """
    executor = DaskJobExecutor.connect(scheduler_addr)
    with lt.Context(executor=executor) as ctx:
        yield ctx
Example #11
0
def ipy_ctx():
    import ipyparallel
    client = ipyparallel.Client()
    retries = 10
    while retries > 0:
        retries -= 1
        if len(client.ids) > 0:
            break
        time.sleep(1)
    dask_client = client.become_dask()
    executor = DaskJobExecutor(client=dask_client, is_local=False)
    with lt.Context(executor=executor) as ctx:
        yield ctx
Example #12
0
 def __init__(self, path, dtype, scan_size, detector_size, warmup_rounds,
              roi, mask):
     super().__init__(path, dtype, scan_size, detector_size, warmup_rounds,
                      roi, mask)
     self.ctx = lt.Context()
     self.ds = self.ctx.load(
         'raw',
         path=path,
         dtype=dtype,
         scan_size=scan_size,
         detector_size_raw=detector_size,
         crop_detector_to=detector_size,
     )
     self.boolean_mask = self.mask == 1
Example #13
0
def dist_ctx():
    """
    This Context needs to have an external dask cluster running, with the following
    assumptions:

     - two workers: hostnames worker-1 and worker-2
     - one scheduler node
     - data availability TBD
     - the address of the dask scheduler is passed in as DASK_SCHEDULER_ADDRESS
    """
    scheduler_addr = os.environ['DASK_SCHEDULER_ADDRESS']
    executor = DaskJobExecutor.connect(scheduler_addr)
    with lt.Context(executor=executor) as ctx:
        yield ctx
Example #14
0
def test_difftodect_com_flip_rot_scale(dim):
    lt_ctx = lt.Context(InlineJobExecutor())
    data_shape = (2, 2, dim, dim)
    data = np.zeros(data_shape)
    data[0, 0, 7, 7] = 1
    data[0, 1, 7, 8] = 1
    data[1, 1, 8, 8] = 1
    data[1, 0, 8, 7] = 1
    source_shape = data_shape[2:]
    target_shape = data_shape[2:]

    f = diffraction_to_detector(lamb=1,
                                diffraction_shape=target_shape,
                                pixel_size_real=1,
                                pixel_size_detector=1 /
                                (np.array(target_shape)) * 4,
                                cy=source_shape[0] / 2,
                                cx=source_shape[1] / 2,
                                flip_y=True,
                                scan_rotation=-90.)
    m = image_transformation_matrix(
        source_shape=source_shape,
        target_shape=target_shape,
        affine_transformation=f,
    )
    transformed_data = apply_matrix(data, m, target_shape)
    ds = lt_ctx.load('memory', data=data, sig_dims=2)
    transformed_ds = lt_ctx.load('memory', data=transformed_data, sig_dims=2)
    com_a = lt_ctx.create_com_analysis(dataset=ds,
                                       mask_radius=np.inf,
                                       flip_y=True,
                                       scan_rotation=-90.,
                                       cy=target_shape[0] / 2,
                                       cx=target_shape[1] / 2)

    com_res = lt_ctx.run(com_a)

    trans_com_a = lt_ctx.create_com_analysis(dataset=transformed_ds,
                                             mask_radius=np.inf,
                                             flip_y=False,
                                             scan_rotation=0.,
                                             cy=target_shape[0] / 2,
                                             cx=target_shape[1] / 2)
    trans_com_res = lt_ctx.run(trans_com_a)
    print(com_res.field.raw_data)
    print(trans_com_res.field.raw_data)

    assert np.allclose(com_res.field.raw_data,
                       np.array(trans_com_res.field.raw_data) / 4)
Example #15
0
def main():
    path = r'C:\Users\lesnic\Nextcloud\Dieter\cGaN_sim_300kV\DPs\CBED_MSAP.raw'
    shape = (50, 50, 189, 189)
    ctx = lt.Context(executor=InlineJobExecutor())
    data_s = ctx.load(
        "raw", path=path, dtype="float32",
        scan_size=shape[:2], detector_size=shape[-2:]
    )
    udf = SSB_UDF(dpix=0.5654/50*1e-9, semiconv=25e-3, semiconv_pix=78.6649, lamb=1.96e-12)
    result = ctx.run_udf(udf=udf, dataset=data_s)
    fig, axes = plt.subplots(1, 2)
    axes[0].imshow(np.abs(result['pixelsum']), norm=LogNorm())
    axes[1].imshow(np.angle(np.fft.ifft2(result['pixelsum'])))

    input("press return to continue")
Example #16
0
def main():
    ctx = api.Context(executor=InlineJobExecutor())

    ds = RawFileDataSet(path="/home/clausen/Data/EMPAD/scan_11_x256_y256.raw",
                        scan_size=(256, 256),
                        detector_size_raw=(130, 128),
                        crop_detector_to=(128, 128),
                        tileshape=(1, 8, 128, 128),
                        dtype="float32")
    ds.initialize()

    job = ctx.create_mask_analysis(dataset=ds,
                                   factories=[lambda: np.ones(ds.shape.sig)])

    result = ctx.run(job)
Example #17
0
def default_raw_asymm(tmpdir_factory, default_raw_data):
    lt_ctx = lt.Context(executor=InlineJobExecutor())
    datadir = tmpdir_factory.mktemp('data')
    filename = datadir + '/raw-test-default'
    default_raw_data.tofile(str(filename))
    del default_raw_data
    ds = lt_ctx.load(
        "raw",
        path=str(filename),
        dtype="float32",
        nav_shape=(14, 17),
        sig_shape=(128, 128),
        io_backend=MMapBackend(),
    )
    ds.set_num_cores(2)
    yield ds
Example #18
0
def buffered_raw(tmpdir_factory, default_raw_data):
    lt_ctx = lt.Context(executor=InlineJobExecutor())
    datadir = tmpdir_factory.mktemp('data')
    filename = datadir + '/raw-test-buffered'
    default_raw_data.tofile(str(filename))
    del default_raw_data

    ds = lt_ctx.load(
        "raw",
        path=str(filename),
        dtype="float32",
        nav_shape=(16, 16),
        sig_shape=(128, 128),
        io_backend=BufferedBackend(),
    )
    yield ds
Example #19
0
def test_start_local_default(hdf5_ds_1):
    mask = _mk_random(size=(16, 16))
    d = detect()
    cudas = d['cudas']
    with hdf5_ds_1.get_reader().get_h5ds() as h5ds:
        data = h5ds[:]
        expected = _naive_mask_apply([mask], data)

    with api.Context() as ctx:
        analysis = ctx.create_mask_analysis(dataset=hdf5_ds_1,
                                            factories=[lambda: mask])
        # Based on ApplyMasksUDF, which is CuPy-enabled
        hybrid = ctx.run(analysis)
        _ = ctx.run_udf(udf=DebugDeviceUDF(), dataset=hdf5_ds_1)
        _ = ctx.run_udf(udf=DebugDeviceUDF(backends=('cupy', 'numpy')),
                        dataset=hdf5_ds_1)
        _ = ctx.run_udf(udf=DebugDeviceUDF(backends=('cuda', 'numpy')),
                        dataset=hdf5_ds_1)
        _ = ctx.run_udf(udf=DebugDeviceUDF(backends=('cupy', 'cuda', 'numpy')),
                        dataset=hdf5_ds_1)
        if cudas:
            cuda_only = ctx.run_udf(udf=DebugDeviceUDF(backends=('cuda',
                                                                 'numpy')),
                                    dataset=hdf5_ds_1,
                                    backends=('cuda', ))
            if d['has_cupy']:
                cupy_only = ctx.run_udf(udf=DebugDeviceUDF(backends=('cupy',
                                                                     'numpy')),
                                        dataset=hdf5_ds_1,
                                        backends=('cupy', ))
            else:
                with pytest.raises(RuntimeError):
                    cupy_only = ctx.run_udf(
                        udf=DebugDeviceUDF(backends=('cupy', 'numpy')),
                        dataset=hdf5_ds_1,
                        backends=('cupy', ))
                cupy_only = None

        numpy_only = ctx.run_udf(udf=DebugDeviceUDF(backends=('numpy', )),
                                 dataset=hdf5_ds_1)

    assert np.allclose(hybrid.mask_0.raw_data, expected)
    if cudas:
        assert np.all(cuda_only['device_class'].data == 'cuda')
        if cupy_only is not None:
            assert np.all(cupy_only['device_class'].data == 'cuda')
    assert np.all(numpy_only['device_class'].data == 'cpu')
def main():
    ctx = api.Context(executor=InlineJobExecutor())

    ds = RawFilesDataSet(
        path="/home/clausen/Data/many_small_files/frame00016293.bin",
        # path="/home/clausen/Data/many_medsize_files/frame00000001.bin",
        nav_shape=(256, 256),
        sig_shape=(128, 128),
        file_shape=(16, 128, 128),
        tileshape=(1, 8, 128, 128),
        dtype="float32")
    ds.initialize()
    pprint.pprint(list(ds.get_partitions()))

    job = ctx.create_mask_analysis(dataset=ds,
                                   factories=[lambda: np.ones(ds.shape.sig)])

    result = ctx.run(job)
Example #21
0
def test_start_local_cpuonly(hdf5_ds_1):
    # We don't use all since that might be too many
    cpus = (0, 1)
    hdf5_ds_1.set_num_cores(len(cpus))
    mask = _mk_random(size=(16, 16))
    with hdf5_ds_1.get_reader().get_h5ds() as h5ds:
        data = h5ds[:]
        expected = _naive_mask_apply([mask], data)

    spec = cluster_spec(cpus=cpus, cudas=(), has_cupy=False)
    with DaskJobExecutor.make_local(spec=spec) as executor:
        ctx = api.Context(executor=executor)
        analysis = ctx.create_mask_analysis(
            dataset=hdf5_ds_1, factories=[lambda: mask]
        )
        results = ctx.run(analysis)
        udf_res = ctx.run_udf(udf=DebugDeviceUDF(), dataset=hdf5_ds_1)
        # No CuPy resources
        with pytest.raises(RuntimeError):
            _ = ctx.run_udf(udf=DebugDeviceUDF(backends=('cupy',)), dataset=hdf5_ds_1)

    assert np.allclose(
        results.mask_0.raw_data,
        expected
    )
    found = {}

    for val in udf_res['device_id'].data[0].values():
        print(val)
        # no CUDA
        assert val["cuda"] is None
        found[val["cpu"]] = True

    for val in udf_res['backend'].data[0].values():
        print(val)
        # no CUDA
        assert 'numpy' in val

    # Each CPU got work. We have to see if this
    # actually works always since this depends on the scheduler behavior
    assert set(found.keys()) == set(cpus)

    assert np.all(udf_res['device_class'].data == 'cpu')
    assert np.allclose(udf_res['on_device'].data, data.sum(axis=(0, 1)))
Example #22
0
def test_use_plain_dask(hdf5_ds_1):
    # We deactivate the resource scheduling and run on a plain dask cluster
    hdf5_ds_1.set_num_cores(2)
    mask = _mk_random(size=(16, 16))
    with hdf5_ds_1.get_reader().get_h5ds() as h5ds:
        data = h5ds[:]
        expected = _naive_mask_apply([mask], data)
    with dd.LocalCluster(n_workers=2, threads_per_worker=1) as cluster:
        client = dd.Client(cluster, set_as_default=False)
        try:
            executor = DaskJobExecutor(client=client)
            ctx = api.Context(executor=executor)
            analysis = ctx.create_mask_analysis(
                dataset=hdf5_ds_1, factories=[lambda: mask]
            )
            results = ctx.run(analysis)
            udf_res = ctx.run_udf(udf=DebugDeviceUDF(), dataset=hdf5_ds_1)
            # Requesting CuPy, which is not available
            with pytest.raises(RuntimeError):
                _ = ctx.run_udf(udf=DebugDeviceUDF(backends=('cupy',)), dataset=hdf5_ds_1)
        finally:
            # to fix "distributed.client - ERROR - Failed to reconnect to scheduler after 10.00 seconds, closing client"  # NOQA
            client.close()

    assert np.allclose(
        results.mask_0.raw_data,
        expected
    )

    for val in udf_res['device_id'].data[0].values():
        print(val)
        # no CUDA
        assert val["cuda"] is None
        # Default without worker setup
        assert val["cpu"] == 0

    for val in udf_res['backend'].data[0].values():
        print(val)
        # no CUDA
        assert 'numpy' in val

    assert np.all(udf_res['device_class'].data == 'cpu')
    assert np.allclose(udf_res['on_device'].data, data.sum(axis=(0, 1)))
Example #23
0
def test_preload(hdf5_ds_1):
    # We don't use all since that might be too many
    cpus = (0, 1)
    hdf5_ds_1.set_num_cores(len(cpus))

    class CheckEnvUDF(NoOpUDF):
        def process_tile(self, tile):
            assert os.environ['LT_TEST_1'] == 'hello'
            assert os.environ['LT_TEST_2'] == 'world'

    preloads = (
        "import os; os.environ['LT_TEST_1'] = 'hello'",
        "import os; os.environ['LT_TEST_2'] = 'world'",
    )

    spec = cluster_spec(cpus=cpus, cudas=(), has_cupy=False, preload=preloads)
    with DaskJobExecutor.make_local(spec=spec) as executor:
        ctx = api.Context(executor=executor)
        ctx.run_udf(udf=CheckEnvUDF(), dataset=hdf5_ds_1)
Example #24
0
def main():
    with api.Context() as ctx:
        ds = ctx.load("RAW",
                      path=r"C:\Users\Dieter\testfile-32-32-32-32-float32.raw",
                      nav_shape=(32, 32),
                      sig_shape=(32, 32),
                      dtype=np.float32)

        sum_analysis = ctx.create_sum_analysis(dataset=ds)
        sum_result = ctx.run(sum_analysis)

        sum_image = DM.CreateImage(sum_result.intensity.raw_data.copy())
        sum_image.ShowImage()

        haadf_analysis = ctx.create_ring_analysis(dataset=ds)
        haadf_result = ctx.run(haadf_analysis)

        haadf_image = DM.CreateImage(haadf_result.intensity.raw_data.copy())
        haadf_image.ShowImage()
Example #25
0
def main():
    with DaskJobExecutor.connect('tcp://localhost:8786') as executor:
        ctx = api.Context(executor=executor)
        ds = ctx.load("RAW",
                      path=r"C:\Users\Dieter\testfile-32-32-32-32-float32.raw",
                      nav_shape=(32, 32),
                      sig_shape=(32, 32),
                      dtype=np.float32)

        sum_analysis = ctx.create_sum_analysis(dataset=ds)
        sum_result = ctx.run(sum_analysis)

        sum_image = DM.CreateImage(sum_result.intensity.raw_data.copy())
        sum_image.ShowImage()

        haadf_analysis = ctx.create_ring_analysis(dataset=ds)
        haadf_result = ctx.run(haadf_analysis)

        haadf_image = DM.CreateImage(haadf_result.intensity.raw_data.copy())
        haadf_image.ShowImage()
Example #26
0
def test_start_local_cudaonly(hdf5_ds_1):
    cudas = detect()['cudas']
    # Make sure we have enough partitions
    hdf5_ds_1.set_num_cores(len(cudas))
    with hdf5_ds_1.get_reader().get_h5ds() as h5ds:
        data = h5ds[:]

    spec = cluster_spec(cpus=(), cudas=cudas, has_cupy=False)
    with DaskJobExecutor.make_local(spec=spec) as executor:
        ctx = api.Context(executor=executor)
        udf_res = ctx.run_udf(udf=DebugDeviceUDF(backends=('cuda', )),
                              dataset=hdf5_ds_1)
        # No CPU compute resources
        with pytest.raises(RuntimeError):
            _ = ctx.run_udf(udf=DebugDeviceUDF(backends=('numpy', )),
                            dataset=hdf5_ds_1)
        # No ndarray (CuPy) resources
        with pytest.raises(RuntimeError):
            _ = ctx.run_udf(udf=DebugDeviceUDF(backends=('cupy', )),
                            dataset=hdf5_ds_1)

    found = {}

    for val in udf_res['device_id'].data[0].values():
        print(val)
        # no CPU
        assert val["cpu"] is None
        # Register which GPUs got work
        found[val["cuda"]] = True

    for val in udf_res['backend'].data[0].values():
        print(val)
        # CUDA, but no CuPy, i.e. use NumPy
        assert 'numpy' in val

    # Test if each GPU got work. We have to see if this
    # actually works always since this depends on the scheduler behavior
    assert set(found.keys()) == set(cudas)

    assert np.all(udf_res['device_class'].data == 'cuda')
    assert np.allclose(udf_res['on_device'].data, data.sum(axis=(0, 1)))
Example #27
0
def auto_ctx(doctest_namespace):
    ctx = lt.Context(executor=InlineJobExecutor())
    doctest_namespace["ctx"] = ctx
def get_inline_context():
    return api.Context(executor=InlineJobExecutor())
def get_context():
    executor = Registry.get_component('libertem_executor')
    return api.Context(executor=executor.ensure_sync())
Example #30
0
import numpy as np
import matplotlib.pyplot as plt

from libertem import api

logging.basicConfig(level=logging.WARNING)

# Protect the entry point.
# LiberTEM uses dask, which uses multiprocessing to
# start worker processes.
# https://docs.python.org/3/library/multiprocessing.html
if __name__ == '__main__':

    # api.Context() starts a new local cluster.
    # The "with" clause makes sure we shut it down in the end.
    with api.Context() as ctx:
        try:
            path = sys.argv[1]
        except IndexError:
            path = ('C:/Users/weber/Nextcloud/Projects/'
                    'Open Pixelated STEM framework/Data/EMPAD/'
                    'scan_11_x256_y256.emd')
        ds = ctx.load('hdf5',
                      path=path,
                      ds_path='experimental/science_data/data',
                      tileshape=(1, 8, 128, 128))

        (scan_y, scan_x, detector_y, detector_x) = ds.shape
        mask_shape = (detector_y, detector_x)

        # LiberTEM sends functions that create the masks