コード例 #1
0
def test_generate_WKB(ctx_factory, grid_shape, proc_shape, dtype, random,
                      timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    h = 1
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)

    L = (10,)*3
    volume = np.product(L)
    dk = tuple(2 * np.pi / Li for Li in L)
    modes = ps.RayleighGenerator(ctx, fft, dk, volume)

    # only checking that this call is successful
    fk, dfk = modes.generate_WKB(queue, random=random)

    if timing:
        ntime = 10
        from common import timer
        t = timer(lambda: modes.generate_WKB(queue, random=random), ntime=ntime)
        print(f"{random=} set_modes took {t:.3f} ms for {grid_shape=}")
コード例 #2
0
ファイル: test_histogram.py プロジェクト: mfkiwl/pystella
def test_trivial_histogram(ctx_factory, grid_shape, proc_shape, dtype,
                           num_bins, _N, timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    grid_shape = (_N,)*3
    queue = cl.CommandQueue(ctx)
    h = 1
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    histograms = {
        "a": (13., 1),
        "b": (10.3, 2),
        "c": (100.9, 3),
    }
    hist = ps.Histogrammer(mpi, histograms, num_bins, dtype, rank_shape=rank_shape)

    result = hist(queue)

    for key, (_b, weight) in histograms.items():
        res = result[key]
        b = int(np.floor(_b))
        expected = weight * np.product(grid_shape)
        assert res[b] == expected, \
            f"{key}: result={res[b]}, {expected=}, ratio={res[b]/expected}"
        assert np.all(res[res != res[b]] == 0.)
コード例 #3
0
ファイル: test_reduction.py プロジェクト: mfkiwl/pystella
def test_reduction(ctx_factory,
                   grid_shape,
                   proc_shape,
                   dtype,
                   op,
                   _grid_shape,
                   pass_grid_dims,
                   timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    h = 1
    grid_shape = _grid_shape or grid_shape
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    from pymbolic import var
    from pystella import Field
    tmp_insns = [(var("x"), Field("f") / 2 + .31)]

    reducers = {}
    reducers["avg"] = [(var("x"), op)]

    if pass_grid_dims:
        reducer = ps.Reduction(mpi,
                               reducers,
                               rank_shape=rank_shape,
                               tmp_instructions=tmp_insns,
                               grid_size=np.product(grid_shape))
    else:
        reducer = ps.Reduction(mpi, reducers, tmp_instructions=tmp_insns)

    f = clr.rand(queue, rank_shape, dtype=dtype)

    import pyopencl.tools as clt
    pool = clt.MemoryPool(clt.ImmediateAllocator(queue))

    result = reducer(queue, f=f, allocator=pool)
    avg = result["avg"]

    avg_test = reducer.reduce_array(f / 2 + .31, op)
    if op == "avg":
        avg_test /= np.product(grid_shape)

    rtol = 5e-14 if dtype == np.float64 else 1e-5
    assert np.allclose(avg, avg_test, rtol=rtol, atol=0), \
        f"{op} reduction innaccurate for {grid_shape=}, {proc_shape=}"

    if timing:
        from common import timer
        t = timer(lambda: reducer(queue, f=f, allocator=pool), ntime=1000)
        if mpi.rank == 0:
            print(
                f"reduction took {t:.3f} ms for {grid_shape=}, {proc_shape=}")
            bandwidth = f.nbytes / 1024**3 / t * 1000
            print(f"Bandwidth = {bandwidth:.1f} GB/s")
コード例 #4
0
ファイル: test_histogram.py プロジェクト: mfkiwl/pystella
def test_histogram(ctx_factory, grid_shape, proc_shape, dtype, num_bins,
                   timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    h = 1
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    if np.dtype(dtype) in (np.dtype("float64"), np.dtype("complex128")):
        max_rtol, avg_rtol = 1e-10, 1e-11
    else:
        max_rtol, avg_rtol = 5e-4, 5e-5

    from pymbolic import var
    _fx = ps.Field("fx")
    histograms = {
        "count": (var("abs")(_fx) * num_bins, 1),
        "squared": (var("abs")(_fx) * num_bins, _fx**2),
    }
    hist = ps.Histogrammer(mpi, histograms, num_bins, dtype, rank_shape=rank_shape)

    rng = clr.ThreefryGenerator(ctx, seed=12321)
    fx = rng.uniform(queue, rank_shape, dtype)
    fx_h = fx.get()

    result = hist(queue, fx=fx)

    res = result["count"]
    assert np.sum(res.astype("int64")) == np.product(grid_shape), \
        f"Count histogram doesn't sum to grid_size ({np.sum(res)})"

    bins = np.linspace(0, 1, num_bins+1).astype(dtype)
    weights = np.ones_like(fx_h)
    np_res = np.histogram(fx_h, bins=bins, weights=weights)[0]
    np_res = mpi.allreduce(np_res)

    max_err, avg_err = get_errs(res, np_res)
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"Histogrammer inaccurate for grid_shape={grid_shape}" \
        f": {max_err=}, {avg_err=}"

    res = result["squared"]
    np_res = np.histogram(fx_h, bins=bins, weights=fx_h**2)[0]
    np_res = mpi.allreduce(np_res)

    max_err, avg_err = get_errs(res, np_res)
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"Histogrammer with weights inaccurate for grid_shape={grid_shape}" \
        f": {max_err=}, {avg_err=}"

    if timing:
        from common import timer
        t = timer(lambda: hist(queue, fx=fx))
        print(f"histogram took {t:.3f} ms for {grid_shape=}, {dtype=}")
コード例 #5
0
def test_field_statistics(ctx_factory,
                          grid_shape,
                          proc_shape,
                          dtype,
                          _grid_shape,
                          pass_grid_dims,
                          timing=False):
    ctx = ctx_factory()

    queue = cl.CommandQueue(ctx)
    h = 1
    grid_shape = _grid_shape or grid_shape
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    # make select parameters local for convenience
    h = 2
    f = clr.rand(queue, (2, 1) + tuple(ni + 2 * h for ni in rank_shape),
                 dtype=dtype)

    if pass_grid_dims:
        statistics = ps.FieldStatistics(mpi,
                                        h,
                                        rank_shape=rank_shape,
                                        grid_size=np.product(grid_shape))
    else:
        statistics = ps.FieldStatistics(mpi, h)

    import pyopencl.tools as clt
    pool = clt.MemoryPool(clt.ImmediateAllocator(queue))

    stats = statistics(f, allocator=pool)
    avg = stats["mean"]
    var = stats["variance"]

    f_h = f.get()
    rank_sum = np.sum(f_h[..., h:-h, h:-h, h:-h], axis=(-3, -2, -1))
    avg_test = mpi.allreduce(rank_sum) / np.product(grid_shape)

    rank_sum = np.sum(f_h[..., h:-h, h:-h, h:-h]**2, axis=(-3, -2, -1))
    var_test = mpi.allreduce(rank_sum) / np.product(grid_shape) - avg_test**2

    rtol = 5e-14 if dtype == np.float64 else 1e-5

    assert np.allclose(avg, avg_test, rtol=rtol, atol=0), \
        f"average innaccurate for {grid_shape=}, {proc_shape=}"

    assert np.allclose(var, var_test, rtol=rtol, atol=0), \
        f"variance innaccurate for {grid_shape=}, {proc_shape=}"

    if timing:
        from common import timer
        t = timer(lambda: statistics(f, allocator=pool))
        if mpi.rank == 0:
            print(
                f"field stats took {t:.3f} ms "
                f"for outer shape {f.shape[:-3]}, {grid_shape=}, {proc_shape=}"
            )
コード例 #6
0
def test_generate(ctx_factory, grid_shape, proc_shape, dtype, random, timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    h = 1
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)

    num_bins = int(sum(Ni**2 for Ni in grid_shape)**.5 / 2 + .5) + 1
    L = (10,)*3
    volume = np.product(L)
    dk = tuple(2 * np.pi / Li for Li in L)
    spectra = ps.PowerSpectra(mpi, fft, dk, volume)
    modes = ps.RayleighGenerator(ctx, fft, dk, volume, seed=5123)

    kbins = min(dk) * np.arange(0, num_bins)
    test_norm = 1 / 2 / np.pi**2 / np.product(grid_shape)**2

    for exp in [-1, -2, -3]:
        def power(k):
            return k**exp

        fk = modes.generate(queue, random=random, norm=1, field_ps=power)

        spectrum = spectra.norm * spectra.bin_power(fk, queue=queue, k_power=3)[1:-1]
        true_spectrum = test_norm * kbins[1:-1]**3 * power(kbins[1:-1])
        err = np.abs(1 - spectrum / true_spectrum)

        tol = .1 if num_bins < 64 else .3
        assert (np.max(err[num_bins//3:-num_bins//3]) < tol
                and np.average(err[1:]) < tol), \
            f"init power spectrum incorrect for {random=}, k**{exp}"

        if random:
            fx = fft.idft(cla.to_device(queue, fk)).real
            if isinstance(fx, cla.Array):
                fx = fx.get()

            grid_size = np.product(grid_shape)

            avg = mpi.allreduce(np.sum(fx)) / grid_size
            var = mpi.allreduce(np.sum(fx**2)) / grid_size - avg**2
            skew = mpi.allreduce(np.sum(fx**3)) / grid_size - 3 * avg * var - avg**3
            skew /= var**1.5
            assert skew < tol, \
                f"init power spectrum has large skewness for k**{exp}"

    if timing:
        ntime = 10
        from common import timer
        t = timer(lambda: modes.generate(queue, random=random), ntime=ntime)
        print(f"{random=} set_modes took {t:.3f} ms for {grid_shape=}")
コード例 #7
0
ファイル: test_reduction.py プロジェクト: mfkiwl/pystella
def test_reduction_with_new_shape(ctx_factory,
                                  grid_shape,
                                  proc_shape,
                                  dtype,
                                  op,
                                  _grid_shape,
                                  timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    h = 1
    grid_shape = _grid_shape or grid_shape
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    from pystella import Field
    reducers = {}
    reducers["avg"] = [(Field("f"), op)]

    reducer = ps.Reduction(mpi, reducers)

    f = clr.rand(queue, rank_shape, dtype=dtype)
    result = reducer(queue, f=f)
    avg = result["avg"]

    avg_test = reducer.reduce_array(f, op)
    if op == "avg":
        avg_test /= np.product(grid_shape)

    rtol = 5e-14 if dtype == np.float64 else 1e-5
    assert np.allclose(avg, avg_test, rtol=rtol, atol=0), \
        f"{op} reduction innaccurate for {grid_shape=}, {proc_shape=}"

    # test call to reducer with new shape
    grid_shape = tuple(Ni // 2 for Ni in grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)
    f = clr.rand(queue, rank_shape, dtype=dtype)
    result = reducer(queue, f=f)
    avg = result["avg"]

    avg_test = reducer.reduce_array(f, op)
    if op == "avg":
        avg_test /= np.product(grid_shape)

    rtol = 5e-14 if dtype == np.float64 else 1e-5
    assert np.allclose(avg, avg_test, rtol=rtol, atol=0), \
        f"{op} reduction w/new shape innaccurate for {grid_shape=}, {proc_shape=}"
コード例 #8
0
ファイル: test_energy.py プロジェクト: mfkiwl/pystella
def test_scalar_energy(ctx_factory,
                       grid_shape,
                       proc_shape,
                       h,
                       dtype,
                       timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    grid_size = np.product(grid_shape)

    nscalars = 2

    def potential(f):
        phi, chi = f[0], f[1]
        return 1 / 2 * phi**2 + 1 / 2 * chi**2 + 1 / 2 * phi**2 * chi**2

    scalar_sector = ps.ScalarSector(nscalars, potential=potential)
    scalar_energy = ps.Reduction(mpi,
                                 scalar_sector,
                                 rank_shape=rank_shape,
                                 grid_size=grid_size,
                                 halo_shape=h)

    pencil_shape = tuple(ni + 2 * h for ni in rank_shape)
    f = clr.rand(queue, (nscalars, ) + pencil_shape, dtype)
    dfdt = clr.rand(queue, (nscalars, ) + pencil_shape, dtype)
    lap = clr.rand(queue, (nscalars, ) + rank_shape, dtype)

    energy = scalar_energy(queue, f=f, dfdt=dfdt, lap_f=lap, a=np.array(1.))

    kin_test = []
    grad_test = []
    for fld in range(nscalars):
        df_h = dfdt[fld].get()
        rank_sum = np.sum(df_h[h:-h, h:-h, h:-h]**2)
        kin_test.append(1 / 2 * mpi.allreduce(rank_sum) / grid_size)

        f_h = f[fld].get()
        lap_h = lap[fld].get()

        rank_sum = np.sum(-f_h[h:-h, h:-h, h:-h] * lap_h)
        grad_test.append(1 / 2 * mpi.allreduce(rank_sum) / grid_size)

    energy_test = {}
    energy_test["kinetic"] = np.array(kin_test)
    energy_test["gradient"] = np.array(grad_test)

    phi = f[0].get()[h:-h, h:-h, h:-h]
    chi = f[1].get()[h:-h, h:-h, h:-h]
    pot_rank = np.sum(potential([phi, chi]))
    energy_test["potential"] = np.array(mpi.allreduce(pot_rank) / grid_size)

    max_rtol = 1e-14 if dtype == np.float64 else 1e-5
    avg_rtol = 1e-14 if dtype == np.float64 else 1e-5

    for key, value in energy.items():
        max_err, avg_err = get_errs(value, energy_test[key])
        assert max_err < max_rtol and avg_err < avg_rtol, \
            f"{key} inaccurate for {nscalars=}, {grid_shape=}, {proc_shape=}" \
            f": {max_err=}, {avg_err=}"

    if timing:
        from common import timer
        t = timer(lambda: scalar_energy(
            queue, a=np.array(1.), f=f, dfdt=dfdt, lap_f=lap))
        if mpi.rank == 0:
            print(f"scalar energy took {t:.3f} "
                  f"ms for {nscalars=}, {grid_shape=}, {proc_shape=}")
コード例 #9
0
ファイル: test_dft.py プロジェクト: zachjweiner/pystella
def test_dft(ctx_factory,
             grid_shape,
             proc_shape,
             dtype,
             use_fftw,
             timing=False):
    if not use_fftw and np.product(proc_shape) > 1:
        pytest.skip("Must use mpi4py-fft on more than one rank.")

    ctx = ctx_factory()

    queue = cl.CommandQueue(ctx)
    h = 1
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    mpi0 = ps.DomainDecomposition(proc_shape, 0, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype, use_fftw=use_fftw)
    grid_size = np.product(grid_shape)
    rdtype = fft.rdtype

    if fft.is_real:
        np_dft = np.fft.rfftn
        np_idft = np.fft.irfftn
    else:
        np_dft = np.fft.fftn
        np_idft = np.fft.ifftn

    rtol = 1e-11 if dtype in ("float64", "complex128") else 2e-3

    rng = clr.ThreefryGenerator(ctx, seed=12321 * (mpi.rank + 1))
    fx = rng.uniform(queue, rank_shape, rdtype) + 1e-2
    if not fft.is_real:
        fx = fx + 1j * rng.uniform(queue, rank_shape, rdtype)

    fx = fx.get()

    fk = fft.dft(fx)
    if isinstance(fk, cla.Array):
        fk = fk.get()
    fk, _fk = fk.copy(), fk  # hang on to one that fftw won't overwrite

    fx2 = fft.idft(_fk)
    if isinstance(fx2, cla.Array):
        fx2 = fx2.get()

    fx_glb = np.empty(shape=grid_shape, dtype=dtype)
    for root in range(mpi.nranks):
        mpi0.gather_array(queue, fx, fx_glb, root=root)

    fk_glb_np = np.ascontiguousarray(np_dft(fx_glb))
    fx2_glb_np = np.ascontiguousarray(np_idft(fk_glb_np))

    if use_fftw:
        fk_np = fk_glb_np[fft.fft.local_slice(True)]
        fx2_np = fx2_glb_np[fft.fft.local_slice(False)]
    else:
        fk_np = fk_glb_np
        fx2_np = fx2_glb_np

    max_err, avg_err = get_errs(fx, fx2 / grid_size)
    assert max_err < rtol, \
        f"IDFT(DFT(f)) != f for {grid_shape=}, {proc_shape=}: {max_err=}, {avg_err=}"

    max_err, avg_err = get_errs(fk_np, fk)
    assert max_err < rtol, \
        f"DFT disagrees with numpy for {grid_shape=}, {proc_shape=}:"\
        f" {max_err=}, {avg_err=}"

    max_err, avg_err = get_errs(fx2_np, fx2 / grid_size)
    assert max_err < rtol, \
        f"IDFT disagrees with numpy for {grid_shape=}, {proc_shape=}:"\
        f" {max_err=}, {avg_err=}"

    fx_cl = cla.empty(queue, rank_shape, dtype)
    pencil_shape = tuple(ni + 2 * h for ni in rank_shape)
    fx_cl_halo = cla.empty(queue, pencil_shape, dtype)
    fx_np = np.empty(rank_shape, dtype)
    fx_np_halo = np.empty(pencil_shape, dtype)
    fk_cl = cla.empty(queue, fft.shape(True), fft.fk.dtype)
    fk_np = np.empty(fft.shape(True), fft.fk.dtype)

    # FIXME: check that these actually produce the correct result
    fx_types = {
        "cl": fx_cl,
        "cl halo": fx_cl_halo,
        "np": fx_np,
        "np halo": fx_np_halo,
        "None": None
    }

    fk_types = {"cl": fk_cl, "np": fk_np, "None": None}

    # run all of these to ensure no runtime errors even if no timing
    ntime = 20 if timing else 1

    from common import timer

    if mpi.rank == 0:
        print(f"N = {grid_shape}, ",
              "complex" if np.dtype(dtype).kind == "c" else "real")

    from itertools import product
    for (a, input_), (b, output) in product(fx_types.items(),
                                            fk_types.items()):
        t = timer(lambda: fft.dft(input_, output), ntime=ntime)
        if mpi.rank == 0:
            print(f"dft({a}, {b}) took {t:.3f} ms")

    for (a, input_), (b, output) in product(fk_types.items(),
                                            fx_types.items()):
        t = timer(lambda: fft.idft(input_, output), ntime=ntime)
        if mpi.rank == 0:
            print(f"idft({a}, {b}) took {t:.3f} ms")
コード例 #10
0
def test_pol_spectra(ctx_factory, grid_shape, proc_shape, dtype, timing=False):
    ctx = ctx_factory()

    if np.dtype(dtype).kind != "f":
        dtype = "float64"

    queue = cl.CommandQueue(ctx)
    h = 1
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)

    L = (10, 8, 7)
    dk = tuple(2 * np.pi / Li for Li in L)
    dx = tuple(Li / Ni for Li, Ni in zip(L, grid_shape))
    cdtype = fft.cdtype
    spec = ps.PowerSpectra(mpi, fft, dk, np.product(L))

    k_power = 2.

    fk = make_data(*fft.shape(True)).astype(cdtype)
    fk = make_hermitian(fk, fft).astype(cdtype)
    plus = cla.to_device(queue, fk)

    fk = make_data(*fft.shape(True)).astype(cdtype)
    fk = make_hermitian(fk, fft).astype(cdtype)
    minus = cla.to_device(queue, fk)

    plus_ps_1 = spec.bin_power(plus, queue=queue, k_power=k_power)
    minus_ps_1 = spec.bin_power(minus, queue=queue, k_power=k_power)

    project = ps.Projector(fft, h, dk, dx)

    vector = cla.empty(queue, (3, ) + fft.shape(True), cdtype)
    project.pol_to_vec(queue, plus, minus, vector)
    project.vec_to_pol(queue, plus, minus, vector)

    plus_ps_2 = spec.bin_power(plus, k_power=k_power)
    minus_ps_2 = spec.bin_power(minus, k_power=k_power)

    max_rtol = 1e-8 if dtype == np.float64 else 1e-2
    avg_rtol = 1e-11 if dtype == np.float64 else 1e-4

    max_err, avg_err = get_errs(plus_ps_1[1:-2], plus_ps_2[1:-2])
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"plus power spectrum inaccurate for {grid_shape=}: {max_err=}, {avg_err=}"

    max_err, avg_err = get_errs(minus_ps_1[1:-2], minus_ps_2[1:-2])
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"minus power spectrum inaccurate for {grid_shape=}: {max_err=}, {avg_err=}"

    vec_sum = sum(
        spec.bin_power(vector[mu], k_power=k_power) for mu in range(3))
    pol_sum = plus_ps_1 + minus_ps_1

    max_err, avg_err = get_errs(vec_sum[1:-2], pol_sum[1:-2])
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"polarization power spectrum inaccurate for {grid_shape=}" \
        f": {max_err=}, {avg_err=}"

    # reset
    for mu in range(3):
        fk = make_data(*fft.shape(True)).astype(cdtype)
        fk = make_hermitian(fk, fft).astype(cdtype)
        vector[mu].set(fk)

    long = cla.zeros_like(plus)
    project.decompose_vector(queue,
                             vector,
                             plus,
                             minus,
                             long,
                             times_abs_k=True)
    plus_ps = spec.bin_power(plus, k_power=k_power)
    minus_ps = spec.bin_power(minus, k_power=k_power)
    long_ps = spec.bin_power(long, k_power=k_power)

    vec_sum = sum(
        spec.bin_power(vector[mu], k_power=k_power) for mu in range(3))
    dec_sum = plus_ps + minus_ps + long_ps

    max_err, avg_err = get_errs(vec_sum[1:-2], dec_sum[1:-2])
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"decomp power spectrum inaccurate for {grid_shape=}: {max_err=}, {avg_err=}"

    hij = cl.clrandom.rand(queue, (6, ) + rank_shape, dtype)
    gw_spec = spec.gw(hij, project, 1.3)
    gw_pol_spec = spec.gw_polarization(hij, project, 1.3)

    max_rtol = 1e-14 if dtype == np.float64 else 1e-2
    avg_rtol = 1e-11 if dtype == np.float64 else 1e-4

    pol_sum = gw_pol_spec[0] + gw_pol_spec[1]
    max_err, avg_err = get_errs(gw_spec[1:-2], pol_sum[1:-2])
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"gw pol don't add up to gw for {grid_shape=}: {max_err=}, {avg_err=}"
コード例 #11
0
def test_spectra(ctx_factory, grid_shape, proc_shape, dtype, L, timing=False):
    ctx = ctx_factory()

    queue = cl.CommandQueue(ctx)
    h = 1
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)

    L = L or (3, 5, 7)
    dk = tuple(2 * np.pi / Li for Li in L)
    cdtype = fft.cdtype
    spec = ps.PowerSpectra(mpi,
                           fft,
                           dk,
                           np.product(L),
                           bin_width=min(dk) + .001)
    # FIXME: bin_width=min(dk) sometimes disagrees to O(.1%) with numpy...

    assert int(np.sum(spec.bin_counts)) == np.product(grid_shape), \
        "bin counts don't sum to total number of points/modes"

    k_power = 2.
    fk = make_data(*fft.shape(True)).astype(cdtype)

    fk_d = cla.to_device(queue, fk)
    spectrum = spec.bin_power(fk_d, k_power=k_power)
    bins = np.arange(-.5, spec.num_bins + .5) * spec.bin_width

    sub_k = list(x.get() for x in fft.sub_k.values())
    kvecs = np.meshgrid(*sub_k, indexing="ij", sparse=False)
    kmags = np.sqrt(sum((dki * ki)**2 for dki, ki in zip(dk, kvecs)))

    if fft.is_real:
        counts = 2. * np.ones_like(kmags)
        counts[kvecs[2] == 0] = 1
        counts[kvecs[2] == grid_shape[-1] // 2] = 1
    else:
        counts = 1. * np.ones_like(kmags)

    if np.dtype(dtype) in (np.dtype("float64"), np.dtype("complex128")):
        max_rtol = 1e-8
        avg_rtol = 1e-11
    else:
        max_rtol = 2e-2
        avg_rtol = 2e-4

    bin_counts2 = spec.bin_power(np.ones_like(fk), queue=queue, k_power=0)

    max_err, avg_err = get_errs(bin_counts2, np.ones_like(bin_counts2))
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"bin counting disagrees between PowerSpectra and np.histogram" \
        f" for {grid_shape=}: {max_err=}, {avg_err=}"

    hist = np.histogram(kmags,
                        bins=bins,
                        weights=np.abs(fk)**2 * counts * kmags**k_power)[0]
    hist = mpi.allreduce(hist) / spec.bin_counts

    # skip the Nyquist mode and the zero mode
    max_err, avg_err = get_errs(spectrum[1:-2], hist[1:-2])
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"power spectrum inaccurate for {grid_shape=}: {max_err=}, {avg_err=}"

    if timing:
        from common import timer
        t = timer(lambda: spec.bin_power(fk_d, k_power=k_power))
        print(f"power spectrum took {t:.3f} ms for {grid_shape=}, {dtype=}")
コード例 #12
0
def test_transfer(ctx_factory, grid_shape, proc_shape, h, dtype, timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    rank_shape = tuple(Ni // pi for Ni, pi in zip(grid_shape, proc_shape))
    mpi = ps.DomainDecomposition(proc_shape, h, rank_shape)
    grid_shape_2 = tuple(Ni // 2 for Ni in grid_shape)
    rank_shape_2 = tuple(ni // 2 for ni in rank_shape)
    mpi2 = ps.DomainDecomposition(proc_shape, h, rank_shape_2)

    from pystella.multigrid import (
        Injection,
        FullWeighting,
        LinearInterpolation,
        # CubicInterpolation
    )

    inject = Injection(halo_shape=h, dtype=dtype)
    full_weighting = FullWeighting(halo_shape=h, dtype=dtype)

    def relerr(a, b):
        return np.max(np.abs(a - b))

    for restrict in [inject, full_weighting]:
        f1h = cla.zeros(queue, tuple(ni + 2 * h for ni in rank_shape), dtype)
        f2h = cla.zeros(queue, tuple(ni + 2 * h for ni in rank_shape_2), dtype)

        kvec = 2 * np.pi * np.array([1, 1, 1]).astype(dtype)

        xvecs = np.meshgrid(np.linspace(0, 1, grid_shape[0] + 1)[:-1],
                            np.linspace(0, 1, grid_shape[1] + 1)[:-1],
                            np.linspace(0, 1, grid_shape[2] + 1)[:-1],
                            indexing="ij")

        phases = kvec[0] * xvecs[0] + kvec[1] * xvecs[1] + kvec[2] * xvecs[2]
        mpi.scatter_array(queue, np.sin(phases), f1h, root=0)
        mpi.share_halos(queue, f1h)

        restrict(queue, f1=f1h, f2=f2h)

        restrict_error = relerr(f1h.get()[h:-h:2, h:-h:2, h:-h:2],
                                f2h.get()[h:-h, h:-h, h:-h])

        if restrict == inject:
            expected_error_bound = 1e-15
        else:
            expected_error_bound = .05 / (grid_shape[0] / 32)**2

        assert restrict_error < expected_error_bound, \
            f"restrict innaccurate for {grid_shape=}, {h=}, {proc_shape=}"

    linear_interp = LinearInterpolation(halo_shape=h, dtype=dtype)
    # cubic_interp = CubicInterpolation(halo_shape=h, dtype=dtype)

    for interp in [linear_interp]:
        kvec = 2 * np.pi * np.array([1, 1, 1]).astype(dtype)

        xvecs = np.meshgrid(np.linspace(0, 1, grid_shape_2[0] + 1)[:-1],
                            np.linspace(0, 1, grid_shape_2[1] + 1)[:-1],
                            np.linspace(0, 1, grid_shape_2[2] + 1)[:-1],
                            indexing="ij")

        phases = kvec[0] * xvecs[0] + kvec[1] * xvecs[1] + kvec[2] * xvecs[2]
        mpi2.scatter_array(queue, np.sin(phases), f2h, root=0)
        mpi2.share_halos(queue, f2h)

        f1h_new = cla.zeros_like(f1h)
        interp(queue, f1=f1h_new, f2=f2h)
        mpi.share_halos(queue, f1h_new)

        interp_error = relerr(f1h_new.get(), f1h.get())

        # if interp == cubic_interp:
        #     expected_error_bound = .005 / (grid_shape[0]/32)**4
        # else:
        #     expected_error_bound = .1 / (grid_shape[0]/32)**2
        expected_error_bound = .1 / (grid_shape[0] / 32)**2

        assert interp_error < expected_error_bound, \
            f"interp innaccurate for {grid_shape=}, {h=}, {proc_shape=}"
コード例 #13
0
ファイル: test_multigrid.py プロジェクト: mfkiwl/pystella
def test_multigrid(ctx_factory, grid_shape, proc_shape, h, dtype, Solver, MG,
                   timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    rank_shape = tuple(Ni // pi for Ni, pi in zip(grid_shape, proc_shape))
    mpi = ps.DomainDecomposition(proc_shape, h, rank_shape)

    L = 10
    dx = L / grid_shape[0]

    statistics = ps.FieldStatistics(mpi, h, rank_shape=rank_shape,
                                    grid_size=np.product(grid_shape))

    def get_laplacian(f):
        from pystella.derivs import _lap_coefs, centered_diff
        lap_coefs = _lap_coefs[h]
        from pymbolic import var
        return sum([centered_diff(f, lap_coefs, direction=mu, order=2)
                    for mu in range(1, 4)]) / var("dx")**2

    test_problems = {}

    from pystella import Field
    f = Field("f", offset="h")
    rho = Field("rho", offset="h")
    test_problems[f] = (get_laplacian(f), rho)

    f = Field("f2", offset="h")
    rho = Field("rho2", offset="h")
    test_problems[f] = (get_laplacian(f) - f, rho)

    solver = Solver(mpi, queue, test_problems, halo_shape=h, dtype=dtype,
                    fixed_parameters=dict(omega=1/2))
    mg = MG(solver=solver, halo_shape=h, dtype=dtype)

    def zero_mean_array():
        f0 = clr.rand(queue, grid_shape, dtype)
        f = clr.rand(queue, tuple(ni + 2*h for ni in rank_shape), dtype)
        mpi.scatter_array(queue, f0, f, root=0)
        avg = statistics(f)["mean"]
        f = f - avg
        mpi.share_halos(queue, f)
        return f

    f = zero_mean_array()
    rho = zero_mean_array()

    f2 = zero_mean_array()
    rho2 = zero_mean_array()

    poisson_errs = []
    helmholtz_errs = []
    num_v_cycles = 15 if MG == MultiGridSolver else 10
    for i in range(num_v_cycles):
        errs = mg(mpi, queue, dx0=dx, f=f, rho=rho, f2=f2, rho2=rho2)
        poisson_errs.append(errs[-1][-1]["f"])
        helmholtz_errs.append(errs[-1][-1]["f2"])

    for name, cycle_errs in zip(["poisson", "helmholtz"],
                                [poisson_errs, helmholtz_errs]):
        tol = 1e-6 if MG == MultiGridSolver else 1e-15
        assert cycle_errs[-1][1] < tol and cycle_errs[-2][1] < 10*tol, \
            "multigrid solution to {name} eqn is inaccurate for " \
            f"{grid_shape=}, {h=}, {proc_shape=}"
コード例 #14
0
def test_relax(ctx_factory,
               grid_shape,
               proc_shape,
               h,
               dtype,
               Solver,
               timing=False):
    if min(grid_shape) < 128:
        pytest.skip("test_relax needs larger grids, for now")

    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    rank_shape = tuple(Ni // pi for Ni, pi in zip(grid_shape, proc_shape))
    mpi = ps.DomainDecomposition(proc_shape, h, rank_shape)

    L = 10
    dx = L / grid_shape[0]
    dk = 2 * np.pi / L

    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)
    spectra = ps.PowerSpectra(mpi, fft, (dk, ) * 3, L**3)
    statistics = ps.FieldStatistics(mpi,
                                    h,
                                    rank_shape=rank_shape,
                                    grid_size=np.product(grid_shape))

    def get_laplacian(f):
        from pystella.derivs import _lap_coefs, centered_diff
        lap_coefs = _lap_coefs[h]
        from pymbolic import var
        return sum([
            centered_diff(f, lap_coefs, direction=mu, order=2)
            for mu in range(1, 4)
        ]) / var("dx")**2

    test_problems = {}

    from pystella import Field
    f = Field("f", offset="h")
    rho = Field("rho", offset="h")
    test_problems[f] = (get_laplacian(f), rho)

    f = Field("f2", offset="h")
    rho = Field("rho2", offset="h")
    test_problems[f] = (get_laplacian(f) - f, rho)

    solver = Solver(mpi,
                    queue,
                    test_problems,
                    halo_shape=h,
                    dtype=dtype,
                    fixed_parameters=dict(omega=1 / 2))

    def zero_mean_array():
        f0 = clr.rand(queue, grid_shape, dtype)
        f = clr.rand(queue, tuple(ni + 2 * h for ni in rank_shape), dtype)
        mpi.scatter_array(queue, f0, f, root=0)
        avg = statistics(f)["mean"]
        f = f - avg
        mpi.share_halos(queue, f)
        return f

    f = zero_mean_array()
    rho = zero_mean_array()
    tmp = cla.zeros_like(f)

    f2 = zero_mean_array()
    rho2 = zero_mean_array()
    tmp2 = cla.zeros_like(f)

    num_iterations = 1000
    errors = {"f": [], "f2": []}
    first_mode_zeroed = {"f": [], "f2": []}
    for i in range(0, num_iterations, 2):
        solver(mpi,
               queue,
               iterations=2,
               dx=np.array(dx),
               f=f,
               tmp_f=tmp,
               rho=rho,
               f2=f2,
               tmp_f2=tmp2,
               rho2=rho2)

        err = solver.get_error(queue,
                               f=f,
                               r_f=tmp,
                               rho=rho,
                               f2=f2,
                               r_f2=tmp2,
                               rho2=rho2,
                               dx=np.array(dx))
        for k, v in err.items():
            errors[k].append(v)

        for key, resid in zip(["f", "f2"], [tmp, tmp2]):
            spectrum = spectra(resid, k_power=0)
            if mpi.rank == 0:
                max_amp = np.max(spectrum)
                first_zero = np.argmax(spectrum[1:] < 1e-30 * max_amp)
                first_mode_zeroed[key].append(first_zero)

    for k, errs in errors.items():
        errs = np.array(errs)
        iters = np.arange(1, errs.shape[0] + 1)
        assert (errs[10:, 0] * iters[10:] / errs[0, 0] < 1.).all(), \
            "relaxation not converging at least linearly for " \
            f"{grid_shape=}, {h=}, {proc_shape=}"

    first_mode_zeroed = mpi.bcast(first_mode_zeroed, root=0)
    for k, x in first_mode_zeroed.items():
        x = np.array(list(x))[2:]
        assert (x[1:] <= x[:-1]).all() and np.min(x) < np.max(x) / 5, \
            f"relaxation not smoothing error {grid_shape=}, {h=}, {proc_shape=}"
コード例 #15
0
ファイル: test_projectors.py プロジェクト: mfkiwl/pystella
def test_tensor_projector(ctx_factory,
                          grid_shape,
                          proc_shape,
                          h,
                          dtype,
                          timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)

    L = (10, 8, 11.5)
    dx = tuple(Li / Ni for Li, Ni in zip(L, grid_shape))
    dk = tuple(2 * np.pi / Li for Li in L)

    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)
    cdtype = fft.cdtype
    if h > 0:
        stencil = FirstCenteredDifference(h)
        project = ps.Projector(fft, stencil.get_eigenvalues, dk, dx)
        derivs = ps.FiniteDifferencer(mpi, h, dx)
    else:
        project = ps.Projector(fft, lambda k, dx: k, dk, dx)
        derivs = ps.SpectralCollocator(fft, dk)

    vector_x = cla.empty(queue, (3, ) + tuple(ni + 2 * h for ni in rank_shape),
                         dtype)
    div = cla.empty(queue, rank_shape, dtype)
    pdx = cla.empty(queue, (3, ) + rank_shape, dtype)

    def get_divergence_errors(hij):
        max_errors = []
        avg_errors = []
        for i in range(1, 4):
            for mu in range(3):
                fft.idft(hij[tensor_id(i, mu + 1)], vector_x[mu])

            derivs.divergence(queue, vector_x, div)

            derivs(queue, fx=vector_x[0], pdx=pdx[0])
            derivs(queue, fx=vector_x[1], pdy=pdx[1])
            derivs(queue, fx=vector_x[2], pdz=pdx[2])
            norm = sum([clm.fabs(pdx[mu]) for mu in range(3)])

            max_errors.append(cla.max(clm.fabs(div)) / cla.max(norm))
            avg_errors.append(cla.sum(clm.fabs(div)) / cla.sum(norm))

        return np.array(max_errors), np.array(avg_errors)

    max_rtol = 1e-11 if dtype == np.float64 else 1e-4
    avg_rtol = 1e-13 if dtype == np.float64 else 1e-5

    def get_trace_errors(hij_h):
        trace = sum([hij_h[tensor_id(i, i)] for i in range(1, 4)])
        norm = np.sqrt(
            sum(np.abs(hij_h[tensor_id(i, i)])**2 for i in range(1, 4)))

        trace = np.abs(trace[norm != 0]) / norm[norm != 0]
        trace = trace[trace < .9]
        return np.max(trace), np.sum(trace) / trace.size

    k_shape = fft.shape(True)
    hij = cla.empty(queue, shape=(6, ) + k_shape, dtype=cdtype)

    for mu in range(6):
        hij[mu] = make_data(queue, fft).astype(cdtype)

    project.transverse_traceless(queue, hij)
    hij_h = hij.get()

    if isinstance(fft, gDFT):
        assert all(is_hermitian(hij_h[i]) for i in range(6)), \
            f"TT projection is non-hermitian for {grid_shape=}, {h=}"

    max_err, avg_err = get_divergence_errors(hij)
    assert all(max_err < max_rtol) and all(avg_err < avg_rtol), \
        f"TT projection not transverse for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    max_err, avg_err = get_trace_errors(hij_h)
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"TT projected tensor isn't traceless for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    plus = make_data(queue, fft).astype(cdtype)
    minus = make_data(queue, fft).astype(cdtype)
    project.pol_to_tensor(queue, plus, minus, hij)

    if isinstance(fft, gDFT):
        assert all(is_hermitian(hij[i]) for i in range(6)), \
            f"pol->tensor is non-hermitian for {grid_shape=}, {h=}"

    max_err, avg_err = get_divergence_errors(hij)
    assert all(max_err < max_rtol) and all(avg_err < avg_rtol), \
        f"pol->tensor not transverse for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    hij_h = hij.get()
    max_err, avg_err = get_trace_errors(hij_h)
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"pol->tensor isn't traceless for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    hij_2 = cla.zeros_like(hij)
    project.transverse_traceless(queue, hij, hij_2)
    hij_h_2 = hij_2.get()

    max_err, avg_err = get_errs(hij_h, hij_h_2)
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"pol->tensor != its own TT projection for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    plus1 = cla.zeros_like(plus)
    minus1 = cla.zeros_like(minus)
    project.tensor_to_pol(queue, plus1, minus1, hij)

    if isinstance(fft, gDFT):
        assert is_hermitian(plus1) and is_hermitian(minus1), \
            f"polarizations aren't hermitian for {grid_shape=}, {h=}"

    max_err, avg_err = get_errs(plus1.get(), plus.get())
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"pol->tensor->pol (plus) is not identity for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    max_err, avg_err = get_errs(minus1.get(), minus.get())
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"pol->tensor->pol (minus) is not identity for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    project.tensor_to_pol(queue, hij[0], hij[1], hij)

    max_err, avg_err = get_errs(plus1.get(), hij[0].get())
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"in-place pol->tensor->pol (plus) not identity for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    max_err, avg_err = get_errs(minus1.get(), hij[1].get())
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"in-place pol->tensor->pol (minus) not identity for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    if timing:
        from common import timer
        ntime = 10
        t = timer(lambda: project.transverse_traceless(queue, hij),
                  ntime=ntime)
        print(f"TT projection took {t:.3f} ms for {grid_shape=}")
        t = timer(lambda: project.pol_to_tensor(queue, plus, minus, hij),
                  ntime=ntime)
        print(f"pol->tensor took {t:.3f} ms for {grid_shape=}")
        t = timer(lambda: project.tensor_to_pol(queue, plus, minus, hij),
                  ntime=ntime)
        print(f"tensor->pol took {t:.3f} ms for {grid_shape=}")
コード例 #16
0
def test_gather_scatter(ctx_factory,
                        grid_shape,
                        proc_shape,
                        h,
                        dtype,
                        _grid_shape,
                        pass_grid_shape,
                        timing=False):
    ctx = ctx_factory()

    if isinstance(h, int):
        h = (h, ) * 3

    queue = cl.CommandQueue(ctx)
    grid_shape = _grid_shape or grid_shape
    mpi = ps.DomainDecomposition(proc_shape, h)
    rank_shape, substart = mpi.get_rank_shape_start(grid_shape)

    rank_slice = tuple(
        slice(si, si + ri) for ri, si, hi in zip(rank_shape, substart, h))
    pencil_shape = tuple(ni + 2 * hi for ni, hi in zip(rank_shape, h))

    unpadded_slc = tuple(slice(hi, -hi) if hi > 0 else slice(None) for hi in h)

    # create random data with same seed on all ranks
    rng = clr.ThreefryGenerator(ctx, seed=12321)
    data = rng.uniform(queue, grid_shape, dtype)

    # cl.Array -> cl.Array
    subdata = cla.zeros(queue, pencil_shape, dtype)
    mpi.scatter_array(queue, data if mpi.rank == 0 else None, subdata, 0)
    sub_h = subdata.get()
    data_h = data.get()
    assert (sub_h[unpadded_slc] == data_h[rank_slice]).all()

    data_test = cla.zeros_like(data)
    mpi.gather_array(queue, subdata, data_test if mpi.rank == 0 else None, 0)
    data_test_h = data_test.get()
    if mpi.rank == 0:
        assert (data_test_h == data_h).all()

    # np.ndarray -> np.ndarray
    mpi.scatter_array(queue, data_h if mpi.rank == 0 else None, sub_h, 0)
    assert (sub_h[unpadded_slc] == data_h[rank_slice]).all()

    mpi.gather_array(queue, sub_h, data_test_h if mpi.rank == 0 else None, 0)
    if mpi.rank == 0:
        assert (data_test_h == data_h).all()

    # scatter cl.Array -> np.ndarray
    sub_h[:] = 0
    mpi.scatter_array(queue, data if mpi.rank == 0 else None, sub_h, 0)
    assert (sub_h[unpadded_slc] == data_h[rank_slice]).all()

    # gather np.ndarray -> cl.Array
    data_test[:] = 0
    mpi.gather_array(queue, sub_h, data_test if mpi.rank == 0 else None, 0)
    data_test_h = data_test.get()
    if mpi.rank == 0:
        assert (data_test_h == data_h).all()

    # scatter np.ndarray -> cl.Array
    subdata[:] = 0
    mpi.scatter_array(queue, data_h if mpi.rank == 0 else None, subdata, 0)
    sub_h = subdata.get()
    assert (sub_h[unpadded_slc] == data_h[rank_slice]).all()

    # gather cl.Array -> np.ndarray
    data_test_h[:] = 0
    mpi.gather_array(queue, subdata, data_test_h if mpi.rank == 0 else None, 0)
    if mpi.rank == 0:
        assert (data_test_h == data_h).all()

    if timing:
        from common import timer
        ntime = 25
        times = {}

        times["scatter cl.Array -> cl.Array"] = \
            timer(lambda: mpi.scatter_array(queue, data, subdata, 0), ntime=ntime)
        times["scatter cl.Array -> np.ndarray"] = \
            timer(lambda: mpi.scatter_array(queue, data, sub_h, 0), ntime=ntime)
        times["scatter np.ndarray -> cl.Array"] = \
            timer(lambda: mpi.scatter_array(queue, data_h, subdata, 0), ntime=ntime)
        times["scatter np.ndarray -> np.ndarray"] = \
            timer(lambda: mpi.scatter_array(queue, data_h, sub_h, 0), ntime=ntime)

        times["gather cl.Array -> cl.Array"] = \
            timer(lambda: mpi.gather_array(queue, subdata, data, 0), ntime=ntime)
        times["gather cl.Array -> np.ndarray"] = \
            timer(lambda: mpi.gather_array(queue, subdata, data_h, 0), ntime=ntime)
        times["gather np.ndarray -> cl.Array"] = \
            timer(lambda: mpi.gather_array(queue, sub_h, data, 0), ntime=ntime)
        times["gather np.ndarray -> np.ndarray"] = \
            timer(lambda: mpi.gather_array(queue, sub_h, data_h, 0), ntime=ntime)

        if mpi.rank == 0:
            print(f"{grid_shape=}, {h=}, {proc_shape=}")
            for key, val in times.items():
                print(f"{key} took {val:.3f} ms")
コード例 #17
0
ファイル: test_poisson.py プロジェクト: mfkiwl/pystella
def test_spectral_poisson(ctx_factory, grid_shape, proc_shape, h, dtype,
                          timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)
    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)

    L = (3, 5, 7)
    dx = tuple(Li / Ni for Li, Ni in zip(L, grid_shape))
    dk = tuple(2 * np.pi / Li for Li in L)

    if h == 0:
        def get_evals_2(k, dx):
            return - k**2

        derivs = ps.SpectralCollocator(fft, dk)
    else:
        from pystella.derivs import SecondCenteredDifference
        get_evals_2 = SecondCenteredDifference(h).get_eigenvalues
        derivs = ps.FiniteDifferencer(mpi, h, dx, stream=False)

    solver = ps.SpectralPoissonSolver(fft, dk, dx, get_evals_2)

    pencil_shape = tuple(ni + 2*h for ni in rank_shape)

    statistics = ps.FieldStatistics(mpi, 0, rank_shape=rank_shape,
                                    grid_size=np.product(grid_shape))

    fx = cla.empty(queue, pencil_shape, dtype)
    rho = clr.rand(queue, rank_shape, dtype)
    rho -= statistics(rho)["mean"]
    lap = cla.empty(queue, rank_shape, dtype)
    rho_h = rho.get()

    for m_squared in (0, 1.2, 19.2):
        solver(queue, fx, rho, m_squared=m_squared)
        fx_h = fx.get()
        if h > 0:
            fx_h = fx_h[h:-h, h:-h, h:-h]

        derivs(queue, fx=fx, lap=lap)

        diff = np.fabs(lap.get() - rho_h - m_squared * fx_h)
        max_err = np.max(diff) / cla.max(clm.fabs(rho))
        avg_err = np.sum(diff) / cla.sum(clm.fabs(rho))

        max_rtol = 1e-12 if dtype == np.float64 else 1e-4
        avg_rtol = 1e-13 if dtype == np.float64 else 1e-5

        assert max_err < max_rtol and avg_err < avg_rtol, \
            f"solution inaccurate for {h=}, {grid_shape=}, {proc_shape=}"

    if timing:
        from common import timer
        time = timer(lambda: solver(queue, fx, rho, m_squared=m_squared), ntime=10)

        if mpi.rank == 0:
            print(f"poisson took {time:.3f} ms for {grid_shape=}, {proc_shape=}")
コード例 #18
0
ファイル: test_histogram.py プロジェクト: mfkiwl/pystella
def test_field_histogram(ctx_factory, grid_shape, proc_shape, dtype, timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    h = 1
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)
    pencil_shape = tuple(Ni + 2 * h for Ni in rank_shape)

    num_bins = 432

    if np.dtype(dtype) in (np.dtype("float64"), np.dtype("complex128")):
        max_rtol, avg_rtol = 1e-10, 1e-11
    else:
        max_rtol, avg_rtol = 5e-4, 5e-5

    hist = ps.FieldHistogrammer(mpi, num_bins, dtype,
                                rank_shape=rank_shape, halo_shape=h)

    rng = clr.ThreefryGenerator(ctx, seed=12321)
    fx = rng.uniform(queue, (2, 2)+pencil_shape, dtype, a=-1.2, b=3.)
    fx_h = fx.get()[..., h:-h, h:-h, h:-h]

    result = hist(fx)

    outer_shape = fx.shape[:-3]
    from itertools import product
    slices = list(product(*[range(n) for n in outer_shape]))

    for slc in slices:
        res = result["linear"][slc]
        np_res = np.histogram(fx_h[slc], bins=result["linear_bins"][slc])[0]
        np_res = mpi.allreduce(np_res)

        max_err, avg_err = get_errs(res, np_res)
        assert max_err < max_rtol and avg_err < avg_rtol, \
            f"linear Histogrammer inaccurate for grid_shape={grid_shape}" \
            f": {max_err=}, {avg_err=}"

        res = result["log"][slc]
        bins = result["log_bins"][slc]

        # avoid FPA comparison issues
        # numpy sometimes doesn't count the actual maximum/minimum
        eps = 1e-14 if np.dtype(dtype) == np.dtype("float64") else 1e-4
        bins[0] *= (1 - eps)
        bins[-1] *= (1 + eps)

        np_res = np.histogram(np.abs(fx_h[slc]), bins=bins)[0]
        np_res = mpi.allreduce(np_res)
        norm = np.maximum(np.abs(res), np.abs(np_res))
        norm[norm == 0.] = 1.

        max_err, avg_err = get_errs(res, np_res)
        assert max_err < max_rtol and avg_err < avg_rtol, \
            f"log Histogrammer inaccurate for grid_shape={grid_shape}" \
            f": {max_err=}, {avg_err=}"

    if timing:
        from common import timer
        t = timer(lambda: hist(fx[0, 0]))
        print(f"field histogram took {t:.3f} ms for {grid_shape=}, {dtype=}")
コード例 #19
0
def test_share_halos(ctx_factory,
                     grid_shape,
                     proc_shape,
                     h,
                     dtype,
                     _grid_shape,
                     pass_grid_shape,
                     timing=False):
    ctx = ctx_factory()

    if isinstance(h, int):
        h = (h, ) * 3

    queue = cl.CommandQueue(ctx)
    grid_shape = _grid_shape or grid_shape
    mpi = ps.DomainDecomposition(
        proc_shape, h, grid_shape=(grid_shape if pass_grid_shape else None))
    rank_shape, substart = mpi.get_rank_shape_start(grid_shape)

    # data will be same on each rank
    rng = clr.ThreefryGenerator(ctx, seed=12321)
    data = rng.uniform(queue,
                       tuple(Ni + 2 * hi for Ni, hi in zip(grid_shape, h)),
                       dtype).get()
    if h[0] > 0:
        data[:h[0], :, :] = data[-2 * h[0]:-h[0], :, :]
        data[-h[0]:, :, :] = data[h[0]:2 * h[0], :, :]
    if h[1] > 0:
        data[:, :h[1], :] = data[:, -2 * h[1]:-h[1], :]
        data[:, -h[1]:, :] = data[:, h[1]:2 * h[1], :]
    if h[2] > 0:
        data[:, :, :h[2]] = data[:, :, -2 * h[2]:-h[2]]
        data[:, :, -h[2]:] = data[:, :, h[2]:2 * h[2]]

    subdata = np.empty(tuple(ni + 2 * hi for ni, hi in zip(rank_shape, h)),
                       dtype)
    rank_slice = tuple(
        slice(si + hi, si + ni + hi)
        for ni, si, hi in zip(rank_shape, substart, h))
    unpadded_slc = tuple(slice(hi, -hi) if hi > 0 else slice(None) for hi in h)
    subdata[unpadded_slc] = data[rank_slice]

    subdata_device = cla.to_device(queue, subdata)
    mpi.share_halos(queue, subdata_device)
    subdata2 = subdata_device.get()

    pencil_slice = tuple(
        slice(si, si + ri + 2 * hi)
        for ri, si, hi in zip(rank_shape, substart, h))
    assert (subdata2 == data[pencil_slice]).all(), \
        f"rank {mpi.rank} {mpi.rank_tuple} has incorrect halo data"

    # test that can call with different-shaped input
    if not pass_grid_shape:
        subdata_device_new = clr.rand(
            queue, tuple(ni // 2 + 2 * hi for ni, hi in zip(rank_shape, h)),
            dtype)
        mpi.share_halos(queue, subdata_device_new)

    if timing:
        from common import timer
        t = timer(lambda: mpi.share_halos(queue, fx=subdata_device))
        if mpi.rank == 0:
            print(f"share_halos took {t:.3f} ms for "
                  f"{grid_shape=}, {h=}, {proc_shape=}")
コード例 #20
0
def test_gradient_laplacian(ctx_factory,
                            grid_shape,
                            proc_shape,
                            h,
                            dtype,
                            stream,
                            timing=False):
    if h == 0 and stream is True:
        pytest.skip("no streaming spectral")

    ctx = ctx_factory()

    queue = cl.CommandQueue(ctx)
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, start = mpi.get_rank_shape_start(grid_shape)

    L = (3, 5, 7)
    dx = tuple(Li / Ni for Li, Ni in zip(L, grid_shape))
    dk = tuple(2 * np.pi / Li for Li in L)

    if h == 0:

        def get_evals_1(k, dx):
            return k

        def get_evals_2(k, dx):
            return -k**2

        fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)
        derivs = ps.SpectralCollocator(fft, dk)
    else:
        from pystella.derivs import FirstCenteredDifference, SecondCenteredDifference
        get_evals_1 = FirstCenteredDifference(h).get_eigenvalues
        get_evals_2 = SecondCenteredDifference(h).get_eigenvalues
        if stream:
            try:
                derivs = ps.FiniteDifferencer(mpi,
                                              h,
                                              dx,
                                              rank_shape=rank_shape,
                                              stream=stream)
            except:  # noqa
                pytest.skip("StreamingStencil unavailable")
        else:
            derivs = ps.FiniteDifferencer(mpi, h, dx, rank_shape=rank_shape)

    pencil_shape = tuple(ni + 2 * h for ni in rank_shape)

    # set up test data
    fx_h = np.empty(pencil_shape, dtype)
    kvec = np.array(dk) * np.array([-5, 4, -3]).astype(dtype)
    xvec = np.meshgrid(*[
        dxi * np.arange(si, si + ni)
        for dxi, si, ni in zip(dx, start, rank_shape)
    ],
                       indexing="ij")

    phases = sum(ki * xi for ki, xi in zip(kvec, xvec))
    if h > 0:
        fx_h[h:-h, h:-h, h:-h] = np.sin(phases)
    else:
        fx_h[:] = np.sin(phases)
    fx_cos = np.cos(phases)

    fx = cla.to_device(queue, fx_h)

    lap = cla.empty(queue, rank_shape, dtype)
    grd = cla.empty(queue, (3, ) + rank_shape, dtype)
    derivs(queue, fx=fx, lap=lap, grd=grd)

    eff_kmag_sq = sum(
        get_evals_2(kvec_i, dxi) for dxi, kvec_i in zip(dx, kvec))
    lap_true = eff_kmag_sq * np.sin(phases)

    max_rtol = 1e-9 if dtype == np.float64 else 3e-4
    avg_rtol = 1e-11 if dtype == np.float64 else 5e-5

    # filter small values dominated by round-off error
    mask = np.abs(lap_true) > 1e-11
    max_err, avg_err = get_errs(lap_true[mask], lap.get()[mask])
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"lap inaccurate for {h=}, {grid_shape=}, {proc_shape=}:" \
        f" {max_err=}, {avg_err=}"

    for i in range(3):
        eff_k = get_evals_1(kvec[i], dx[i])
        pdi_true = eff_k * fx_cos

        # filter small values dominated by round-off error
        mask = np.abs(pdi_true) > 1e-11
        max_err, avg_err = get_errs(pdi_true[mask], grd[i].get()[mask])
        assert max_err < max_rtol and avg_err < avg_rtol, \
            f"pd{i} inaccurate for {h=}, {grid_shape=}, {proc_shape=}:" \
            f" {max_err=}, {avg_err=}"

    vec = cla.empty(queue, (3, ) + pencil_shape, dtype)
    for mu in range(3):
        vec[mu] = fx

    div = cla.empty(queue, rank_shape, dtype)
    derivs.divergence(queue, vec, div)
    div_true = sum(grd[i] for i in range(3)).get()

    # filter small values dominated by round-off error
    mask = np.abs(div_true) > 1e-11
    max_err, avg_err = get_errs(div_true[mask], div.get()[mask])
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"div inaccurate for {h=}, {grid_shape=}, {proc_shape=}:" \
        f" {max_err=}, {avg_err=}"

    if timing:
        from common import timer

        base_args = dict(queue=queue, fx=fx)
        div_args = dict(queue=queue, vec=vec, div=div)
        if h == 0:
            import pyopencl.tools as clt
            pool = clt.MemoryPool(clt.ImmediateAllocator(queue))
            base_args["allocator"] = pool
            div_args["allocator"] = pool

        times = {}
        times["gradient and laplacian"] = timer(
            lambda: derivs(lap=lap, grd=grd, **base_args))
        times["gradient"] = timer(lambda: derivs(grd=grd, **base_args))
        times["laplacian"] = timer(lambda: derivs(lap=lap, **base_args))
        times["pdx"] = timer(lambda: derivs(pdx=grd[0], **base_args))
        times["pdy"] = timer(lambda: derivs(pdy=grd[1], **base_args))
        times["pdz"] = timer(lambda: derivs(pdz=grd[2], **base_args))
        times["divergence"] = timer(lambda: derivs.divergence(**div_args))

        if mpi.rank == 0:
            print(f"{grid_shape=}, {h=}, {proc_shape=}")
            for key, val in times.items():
                print(f"{key} took {val:.3f} ms")
コード例 #21
0
ファイル: test_projectors.py プロジェクト: mfkiwl/pystella
def test_vector_projector(ctx_factory,
                          grid_shape,
                          proc_shape,
                          h,
                          dtype,
                          timing=False):
    if ctx_factory:
        ctx = ctx_factory()
    else:
        ctx = ps.choose_device_and_make_context()

    queue = cl.CommandQueue(ctx)
    mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape)
    rank_shape, _ = mpi.get_rank_shape_start(grid_shape)
    pencil_shape = tuple(ni + 2 * h for ni in rank_shape)

    L = (10, 8, 11.5)
    dx = tuple(Li / Ni for Li, Ni in zip(L, grid_shape))
    dk = tuple(2 * np.pi / Li for Li in L)

    fft = ps.DFT(mpi, ctx, queue, grid_shape, dtype)
    cdtype = fft.cdtype
    if h > 0:
        stencil = FirstCenteredDifference(h)
        project = ps.Projector(fft, stencil.get_eigenvalues, dk, dx)
        derivs = ps.FiniteDifferencer(mpi, h, dx)
    else:
        project = ps.Projector(fft, lambda k, dx: k, dk, dx)
        derivs = ps.SpectralCollocator(fft, dk)

    vector_x = cla.empty(queue, (3, ) + pencil_shape, dtype)
    div = cla.empty(queue, rank_shape, dtype)
    pdx = cla.empty(queue, (3, ) + rank_shape, dtype)

    def get_divergence_error(vector):
        for mu in range(3):
            fft.idft(vector[mu], vector_x[mu])

        derivs.divergence(queue, vector_x, div)

        derivs(queue, fx=vector_x[0], pdx=pdx[0])
        derivs(queue, fx=vector_x[1], pdy=pdx[1])
        derivs(queue, fx=vector_x[2], pdz=pdx[2])
        norm = sum([clm.fabs(pdx[mu]) for mu in range(3)])

        max_err = cla.max(clm.fabs(div)) / cla.max(norm)
        avg_err = cla.sum(clm.fabs(div)) / cla.sum(norm)
        return max_err, avg_err

    max_rtol = 1e-11 if dtype == np.float64 else 1e-4
    avg_rtol = 1e-13 if dtype == np.float64 else 1e-5

    k_shape = fft.shape(True)
    vector = cla.empty(queue, (3, ) + k_shape, cdtype)

    for mu in range(3):
        vector[mu] = make_data(queue, fft).astype(cdtype)

    project.transversify(queue, vector)

    max_err, avg_err = get_divergence_error(vector)
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"transversify failed for {grid_shape=}, {h=}: {max_err=}, {avg_err=}"

    plus = make_data(queue, fft).astype(cdtype)
    minus = make_data(queue, fft).astype(cdtype)
    project.pol_to_vec(queue, plus, minus, vector)

    if isinstance(fft, gDFT):
        assert all(is_hermitian(vector[i]) for i in range(3)), \
            f"pol->vec is non-hermitian for {grid_shape=}, {h=}"

    max_err, avg_err = get_divergence_error(vector)
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"pol_to_vec result not transverse for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    vector_h = vector.get()
    vector_2 = cla.zeros_like(vector)
    project.transversify(queue, vector, vector_2)
    vector_2_h = vector_2.get()

    max_err, avg_err = get_errs(vector_h, vector_2_h)
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"pol->vector != its own transverse proj. for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    plus1 = cla.zeros_like(plus)
    minus1 = cla.zeros_like(minus)
    project.vec_to_pol(queue, plus1, minus1, vector)

    if isinstance(fft, gDFT):
        assert is_hermitian(plus1) and is_hermitian(minus1), \
            f"polarizations aren't hermitian for {grid_shape=}, {h=}"

    max_err, avg_err = get_errs(plus1.get(), plus.get())
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"pol->vec->pol (plus) is not identity for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    max_err, avg_err = get_errs(minus1.get(), minus.get())
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"pol->vec->pol (minus) is not identity for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    project.vec_to_pol(queue, vector[0], vector[1], vector)

    max_err, avg_err = get_errs(plus1.get(), vector[0].get())
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"in-place pol->vec->pol (plus) not identity for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    max_err, avg_err = get_errs(minus1.get(), vector[1].get())
    assert max_err < max_rtol and avg_err < avg_rtol, \
        f"in-place pol->vec->pol (minus) not identity for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    # reset and test longitudinal component
    for mu in range(3):
        vector[mu] = make_data(queue, fft).astype(cdtype)
        fft.idft(vector[mu], vector_x[mu])

    long = cla.zeros_like(minus)
    project.decompose_vector(queue, vector, plus1, minus1, long)

    long_x = cla.empty(queue, pencil_shape, dtype)
    fft.idft(long, long_x)

    div_true = cla.empty(queue, rank_shape, dtype)
    derivs.divergence(queue, vector_x, div_true)

    derivs(queue, fx=long_x, grd=pdx)
    div_long = cla.empty(queue, rank_shape, dtype)
    if h != 0:
        pdx_h = cla.empty(queue, (3, ) + pencil_shape, dtype)
        for mu in range(3):
            mpi.restore_halos(queue, pdx[mu], pdx_h[mu])
        derivs.divergence(queue, pdx_h, div_long)
    else:
        derivs.divergence(queue, pdx, div_long)

    max_err, avg_err = get_errs(div_true.get(), div_long.get())
    assert max_err < 1e-6 and avg_err < 1e-11, \
        f"lap(longitudinal) != div vector for {grid_shape=}, {h=}" \
        f": {max_err=}, {avg_err=}"

    if timing:
        from common import timer
        ntime = 10
        t = timer(lambda: project.transversify(queue, vector), ntime=ntime)
        print(f"transversify took {t:.3f} ms for {grid_shape=}")
        t = timer(lambda: project.pol_to_vec(queue, plus, minus, vector),
                  ntime=ntime)
        print(f"pol_to_vec took {t:.3f} ms for {grid_shape=}")
        t = timer(lambda: project.vec_to_pol(queue, plus, minus, vector),
                  ntime=ntime)
        print(f"vec_to_pol took {t:.3f} ms for {grid_shape=}")
        t = timer(
            lambda: project.decompose_vector(queue, vector, plus, minus, long),
            ntime=ntime)
        print(f"decompose_vector took {t:.3f} ms for {grid_shape=}")
コード例 #22
0
mphi = 1.20e-6 * mpl
mchi = 0.
gsq = 2.5e-7
sigma = 0.
lambda4 = 0.
f0 = [.193 * mpl, 0]  # units of mpl
df0 = [-.142231 * mpl, 0]  # units of mpl
end_time = 1
end_scale_factor = 20
Stepper = ps.LowStorageRK54
gravitational_waves = True  # whether to simulate gravitational waves

ctx = ps.choose_device_and_make_context()
queue = cl.CommandQueue(ctx)

decomp = ps.DomainDecomposition(proc_shape, halo_shape, rank_shape)
fft = ps.DFT(decomp, ctx, queue, grid_shape, dtype)
if halo_shape == 0:
    derivs = ps.SpectralCollocator(fft, dk)
else:
    derivs = ps.FiniteDifferencer(decomp,
                                  halo_shape,
                                  dx,
                                  rank_shape=rank_shape)


def potential(f):
    phi, chi = f[0], f[1]
    unscaled = (mphi**2 / 2 * phi**2 + mchi**2 / 2 * chi**2 +
                gsq / 2 * phi**2 * chi**2 + sigma / 2 * phi * chi**2 +
                lambda4 / 4 * chi**4)