コード例 #1
0
def test_kernel(target):
    if target == pystencils.Target.GPU:
        pytest.importorskip('waLBerla.cuda')

    # 3D
    blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 4), cellsPerBlock=(3, 2, 5), oneBlockPerProcess=False)
    dh = ParallelDataHandling(blocks, default_target=target)
    kernel_execution_jacobi(dh, target)
    reduction(dh)

    # 2D
    blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 1), cellsPerBlock=(3, 2, 1), oneBlockPerProcess=False)
    dh = ParallelDataHandling(blocks, dim=2, default_target=target)
    kernel_execution_jacobi(dh, target)
    reduction(dh)
コード例 #2
0
def test_block_iteration():
    block_size = (16, 16, 16)
    num_blocks = (2, 2, 2)
    blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False)
    dh = ParallelDataHandling(blocks, default_ghost_layers=2)
    dh.add_array('v', values_per_cell=1, dtype=np.int64, ghost_layers=2)

    for b in dh.iterate():
        b['v'].fill(1)

    s = 0
    for b in dh.iterate():
        s += np.sum(b['v'])

    assert s == 40*40*40

    sl = make_slice[0:18, 0:18, 0:18]
    for b in dh.iterate(slice_obj=sl):
        b['v'].fill(0)

    s = 0
    for b in dh.iterate():
        s += np.sum(b['v'])

    assert s == 40*40*40 - 20*20*20
コード例 #3
0
def test_gpu():
    if not hasattr(wlb, 'cuda'):
        print("Skip GPU tests because walberla was built without CUDA")
        return

    block_size = (4, 7, 1)
    num_blocks = (3, 2, 1)
    blocks = wlb.createUniformBlockGrid(blocks=num_blocks,
                                        cellsPerBlock=block_size,
                                        oneBlockPerProcess=False)
    dh = ParallelDataHandling(blocks, default_ghost_layers=2)
    dh.add_array('v',
                 values_per_cell=3,
                 dtype=np.int64,
                 ghost_layers=2,
                 gpu=True)

    for b in dh.iterate():
        b['v'].fill(42)
    dh.all_to_gpu()
    for b in dh.iterate():
        b['v'].fill(0)
    dh.to_cpu('v')
    for b in dh.iterate():
        np.testing.assert_equal(b['v'], 42)
コード例 #4
0
def test_data_handling_2d():
    print("--- LDC 2D test ---")
    results = []
    for parallel in [True, False] if parallel_available else [False]:
        for gpu in [True, False] if gpu_available else [False]:
            if parallel and gpu and not hasattr(wLB, 'cuda'):
                continue

            print(f"Testing parallel: {parallel}\tgpu: {gpu}")
            config = CreateKernelConfig(
                target=Target.GPU if gpu else Target.CPU,
                gpu_indexing_params=MappingProxyType({'block_size':
                                                      (8, 4, 2)}))
            if parallel:
                from pystencils.datahandling import ParallelDataHandling
                blocks = wLB.createUniformBlockGrid(blocks=(2, 3, 1),
                                                    cellsPerBlock=(5, 5, 1),
                                                    oneBlockPerProcess=False)
                dh = ParallelDataHandling(blocks, dim=2)
                rho = ldc_setup(data_handling=dh, config=config)
                results.append(rho)
            else:
                rho = ldc_setup(domain_size=(10, 15),
                                parallel=False,
                                config=config)
                results.append(rho)
    for i, arr in enumerate(results[1:]):
        print(f"Testing equivalence version 0 with version {i + 1}")
        np.testing.assert_almost_equal(results[0], arr)
コード例 #5
0
ファイル: __init__.py プロジェクト: mabau/pystencils
def create_data_handling(domain_size: Tuple[int, ...],
                         periodicity: Union[bool, Tuple[bool, ...]] = False,
                         default_layout: str = 'SoA',
                         default_target: Target = Target.CPU,
                         parallel: bool = False,
                         default_ghost_layers: int = 1) -> DataHandling:
    """Creates a data handling instance.

    Args:
        domain_size: size of the rectangular domain
        periodicity: either True, False for full or no periodicity or a tuple of booleans indicating periodicity
                     for each coordinate
        default_layout: default array layout, that is used if not explicitly specified in 'add_array'
        default_target: `Target`
        parallel: if True a parallel domain is created using walberla - each MPI process gets a part of the domain
        default_ghost_layers: default number of ghost layers if not overwritten in 'add_array'
    """
    if isinstance(default_target, str):
        new_target = Target[default_target.upper()]
        warnings.warn(
            f'Target "{default_target}" as str is deprecated. Use {new_target} instead',
            category=DeprecationWarning)
        default_target = new_target

    if parallel:
        if wlb is None:
            raise ValueError(
                "Cannot create parallel data handling because walberla module is not available"
            )

        if periodicity is False or periodicity is None:
            periodicity = (0, 0, 0)
        elif periodicity is True:
            periodicity = (1, 1, 1)
        else:
            periodicity = tuple(int(bool(x)) for x in periodicity)
            if len(periodicity) == 2:
                periodicity += (1, )

        if len(domain_size) == 2:
            dim = 2
            domain_size = (domain_size[0], domain_size[1], 1)
        else:
            dim = 3

        # noinspection PyArgumentList
        block_storage = wlb.createUniformBlockGrid(cells=domain_size,
                                                   periodic=periodicity)
        return ParallelDataHandling(blocks=block_storage,
                                    dim=dim,
                                    default_target=default_target,
                                    default_layout=default_layout,
                                    default_ghost_layers=default_ghost_layers)
    else:
        return SerialDataHandling(domain_size,
                                  periodicity=periodicity,
                                  default_target=default_target,
                                  default_layout=default_layout,
                                  default_ghost_layers=default_ghost_layers)
コード例 #6
0
def test_access_and_gather():
    block_size = (4, 7, 1)
    num_blocks = (3, 2, 1)
    cells = tuple(a * b for a, b in zip(block_size, num_blocks))
    blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False,
                                        periodic=(1, 1, 1))
    dh = ParallelDataHandling(blocks, default_ghost_layers=2)
    access_and_gather(dh, cells)
    synchronization(dh, test_gpu=False)
    if hasattr(wlb, 'cuda'):
        synchronization(dh, test_gpu=True)
コード例 #7
0
def test_kernel():

    for gpu in (True, False):
        if gpu and not hasattr(wlb, 'cuda'):
            print(
                "Skipping CUDA tests because walberla was built without GPU support"
            )
            continue

        # 3D
        blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 4),
                                            cellsPerBlock=(3, 2, 5),
                                            oneBlockPerProcess=False)
        dh = ParallelDataHandling(blocks)
        kernel_execution_jacobi(dh, test_gpu=gpu)
        reduction(dh)

        # 2D
        blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 1),
                                            cellsPerBlock=(3, 2, 1),
                                            oneBlockPerProcess=False)
        dh = ParallelDataHandling(blocks, dim=2)
        kernel_execution_jacobi(dh, test_gpu=gpu)
        reduction(dh)
コード例 #8
0
def test_gpu():
    pytest.importorskip('waLBerla.cuda')

    block_size = (4, 7, 1)
    num_blocks = (3, 2, 1)
    blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False)
    dh = ParallelDataHandling(blocks, default_ghost_layers=2)
    dh.add_array('v', values_per_cell=3, dtype=np.int64, ghost_layers=2, gpu=True)

    for b in dh.iterate():
        b['v'].fill(42)
    dh.all_to_gpu()
    for b in dh.iterate():
        b['v'].fill(0)
    dh.to_cpu('v')
    for b in dh.iterate():
        np.testing.assert_equal(b['v'], 42)
コード例 #9
0
def test_getter_setter():
    pytest.importorskip('waLBerla.cuda')

    block_size = (2, 2, 2)
    num_blocks = (2, 2, 2)
    blocks = wlb.createUniformBlockGrid(blocks=num_blocks, cellsPerBlock=block_size, oneBlockPerProcess=False)
    dh = ParallelDataHandling(blocks, default_ghost_layers=2, default_target=pystencils.Target.GPU)
    dh.add_array('v', values_per_cell=1, dtype=np.int64, ghost_layers=2, gpu=True)

    assert dh.shape == (4, 4, 4)
    assert dh.periodicity == (False, False, False)
    assert dh.values_per_cell('v') == 1
    assert dh.has_data('v') is True
    assert 'v' in dh.array_names
    dh.log_on_root()
    assert dh.is_root is True
    assert dh.world_rank == 0

    dh.to_gpu('v')
    assert dh.is_on_gpu('v') is True
    dh.all_to_cpu()
コード例 #10
0
def test_vtk_output():
    blocks = wlb.createUniformBlockGrid(blocks=(3, 2, 4), cellsPerBlock=(3, 2, 5), oneBlockPerProcess=False)
    dh = ParallelDataHandling(blocks)
    vtk_output(dh)