def get_context_from_string(ctxstr): import xobjects as xo if ctxstr is None: return xo.ContexCPU() else: ll = ctxstr.split(":") if len(ll) <= 1: ctxtype = ll[0] option = [] else: ctxtype, options = ctxstr.split(":") option = options.split(",") if ctxtype == "ContextCpu": if len(option) == 0: return xo.ContextCpu() else: return xo.ContextCpu(omp_num_threads=int(option[0])) elif ctxtype == "ContextCupy": if len(option) == 0: return xo.ContextCupy() else: return xo.ContextCupy(device=int(option[0])) elif ctxtype == "ContextPyopencl": if len(option) == 0: return xo.ContextPyopencl() else: return xo.ContextPyopencl(device=option[0]) else: raise ValueError(f"Cannot create context from `{ctxstr}`")
def test_ref_union(): class StructA(xo.Struct): fa = xo.Float64 ArrayB = xo.Float64[6, 6] class RefA(xo.UnionRef): _reftypes = (StructA, ArrayB) ArrNRefA = RefA[:] paths = ArrNRefA._gen_data_paths() assert len(paths) == 2 kernels = ArrNRefA._gen_kernels() arr = ArrNRefA(3) arr[0] = ("StructA", {"fa": 3.0}) arr[1] = ArrayB() arr[1][2, 3] = 4.0 ctx = xo.ContextCpu() ctx.add_kernels(kernels=kernels) assert ctx.kernels.ArrNRefA_typeid(obj=arr, i0=0) == 0 assert ctx.kernels.ArrNRefA_typeid(obj=arr, i0=1) == 1 assert ctx.kernels.ArrNRefA_typeid(obj=arr, i0=2) == -1
def test_kernel_cpu(): ctx = xo.ContextCpu() src_code = r""" double my_mul(const int n, const double* x1, const double* x2) { int tid; double y =0; for (tid=0; tid<n; tid++){ y+= x1[tid] * x2[tid]; } return y; } """ kernel_descriptions = { "my_mul": xo.Kernel( args=[ xo.Arg(xo.Int32, name="n"), xo.Arg(xo.Float64, pointer=True, name="x1"), xo.Arg(xo.Float64, pointer=True, name="x2"), ], ret=xo.Arg(xo.Float64), ) } ctx.add_kernels(sources=[src_code], kernels=kernel_descriptions) a1 = np.arange(10.0) a2 = np.arange(10.0) y = ctx.kernels.my_mul(n=len(a1), x1=a1, x2=a2) assert y == 285.0
def test_static_struct(): class StructA(xo.Struct): a = xo.Field(xo.Float64, default=3.5) b = xo.Int8 c = xo.Field(xo.Int64) assert StructA.a.index == 0 assert StructA.a.index == 0 assert StructA.b.index == 1 assert StructA.c.index == 2 for ctx in xo.context.get_test_contexts(): print(f"Test {ctx}") s = StructA(_context=xo.ContextCpu()) assert s._size is not None assert s.a == 3.5 assert s.b == 0 assert s.c == 0.0 s.a = 5.2 assert s.a == 5.2 s.c = 7 assert s.c == 7 s.b = -4
def test_struct1(): class Struct1(xo.Struct): field1 = xo.Int64 field2 = xo.Float64 kernels = Struct1._gen_kernels() ctx = xo.ContextCpu() ctx.add_kernels(kernels=kernels) s1 = Struct1(field1=2, field2=3.0) ctx.kernels.Struct1_set_field1(obj=s1, value=7) ctx.kernels.Struct1_get_field1(obj=s1) == s1.field1 ctx.kernels.Struct1_set_field2(obj=s1, value=7) ctx.kernels.Struct1_get_field2(obj=s1) == s1.field2 ps = ctx.kernels.Struct1_getp(obj=s1) p1 = ctx.kernels.Struct1_getp_field1(obj=s1) p2 = ctx.kernels.Struct1_getp_field2(obj=s1) assert ffi.cast("uint64_t *", ps)[0] == s1.field1 assert ffi.cast("double *", ps)[1] == s1.field2 assert p1[0] == s1.field1 assert p2[0] == s1.field2
def test_gen_c_api(): _, Multipole = gen_classes() kernels = Multipole._gen_kernels() ctx = xo.ContextCpu() ctx.add_kernels(kernels=kernels, ) m = Multipole(field=10) m.order = 3 m.field[2].normal = 1 assert ctx.kernels.Multipole_get_order(obj=m) == 3 assert ctx.kernels.Multipole_get_field_normal(obj=m, i0=2) == 1.0
def get_test_contexts(): import os import xobjects as xo ctxstr = os.environ.get("XOBJECTS_TEST_CONTEXTS") if ctxstr is None: yield xo.ContextCpu() #yield xo.ContextCpu(omp_num_threads=2) if xo.ContextCupy in xo.context.available: yield xo.ContextCupy() if xo.ContextPyopencl in xo.context.available: yield xo.ContextPyopencl() elif ctxstr == "all": yield xo.ContextCpu() yield xo.ContextCpu(omp_num_threads=2) if xo.ContextCupy in xo.context.available: yield xo.ContextCupy() if xo.ContextPyopencl in xo.context.available: for dd in xo.ContextPyopencl.get_devices(): yield xo.ContextPyopencl(device=dd) else: for cc in ctxstr.split(";"): yield get_context_from_string(cc)
def test_get_two_indices(): class Point(xo.Struct): x = xo.Float64 y = xo.Float64 class Triangle(Point[3]): pass class Mesh(Triangle[:]): pass kernels = Mesh._gen_kernels() ctx = xo.ContextCpu() ctx.add_kernels(kernels=kernels) m = Mesh(5) m[0][1].x = 3 assert ctx.kernels.Mesh_get_x(obj=m, i0=0, i1=1) == 3
def test_struct2(): class Struct2(xo.Struct): field1 = xo.Int32 field2 = xo.Float64[:] s1 = Struct2(field1=2, field2=5) kernels = Struct2._gen_kernels() ctx = xo.ContextCpu() ctx.add_kernels(kernels=kernels) assert ctx.kernels.Struct2_len_field2(obj=s1) == len(s1.field2) for ii in range(len(s1.field2)): s1.field2[ii] = ii * 3 assert ctx.kernels.Struct2_get_field2(obj=s1, i0=ii) == ii * 3 ctx.kernels.Struct2_set_field2(obj=s1, i0=ii, value=ii * 4) assert s1.field2[ii] == ii * 4
def test_array2(): Array2 = xo.Int64[:] kernels = Array2._gen_kernels() ctx = xo.ContextCpu() ctx.add_kernels(kernels=kernels) ini = [2, 7, 3] a1 = Array2(ini) assert ctx.kernels.ArrNInt64_len(obj=a1) == len(ini) for ii, vv in enumerate(ini): a1[ii] = ii * 3 assert ctx.kernels.ArrNInt64_get(obj=a1, i0=ii) == ii * 3 ctx.kernels.ArrNInt64_set(obj=a1, i0=ii, value=ii * 4) assert a1[ii] == ii * 4
def test_capi_call(): class ParticlesData(xo.Struct): s = xo.Int64[:] x = xo.Int64[:] y = xo.Int64[:] kernels = ParticlesData._gen_kernels() context = xo.ContextCpu() context.add_kernels(kernels=kernels) particles = ParticlesData( s=np.arange(10, 21, 10), x=np.arange(10, 21, 10), y=np.arange(10, 21, 10), ) assert (context.kernels.ParticlesData_get_x(obj=particles, i0=1) == particles.x[1])
def test_ref(): ctx = xo.ContextCpu() class StructA(xo.Struct): fa = xo.Float64 sb = xo.Ref[xo.Int64[:]] paths = StructA._gen_data_paths() assert len(paths) == 4 kernels = StructA._gen_kernels() ctx.add_kernels(kernels=kernels) sa = StructA(fa=2.3) sa.sb = [1, 2, 3] assert sa.fa == ctx.kernels.StructA_get_fa(obj=sa) assert sa.sb[2] == ctx.kernels.StructA_get_sb(obj=sa, i0=2)
def test_2_particles(): context = xo.ContextCpu() class ParticlesData(xo.Struct): num_particles = xo.Int64 s = xo.Float64[:] x = xo.Float64[:] particles = ParticlesData(num_particles=2, s=np.array([1, 2]), x=np.array([7, 8])) kernels = ParticlesData._gen_kernels() context.add_kernels(kernels=kernels) ptr = particles._buffer.to_nplike(0, "int64", (11, )).ctypes.data print(f"{ptr:x}") assert (context.kernels.ParticlesData_get_x(obj=particles, i0=0) == particles.x[0]) assert (context.kernels.ParticlesData_get_x(obj=particles, i0=1) == particles.x[1])
def test_unionref(): class Struct1(xo.Struct): field1 = xo.Int64 field2 = xo.Float64 class Struct2(xo.Struct): field1 = xo.Int32 field2 = xo.Float64[:] class URef(xo.UnionRef): _reftypes = [Struct1, Struct2] ArrNURef = URef[:] arr = ArrNURef(3) arr[0] = Struct1(field1=3, field2=4) arr[1] = Struct2(field1=2, field2=[5, 7]) arr[2] = None kernels = ArrNURef._gen_kernels() kernels.update(Struct1._gen_kernels()) kernels.update(Struct2._gen_kernels()) ctx = xo.ContextCpu() ctx.add_kernels(kernels=kernels) ctx.kernels.ArrNURef_typeid(obj=arr, i0=0) == URef._typeid_from_type(type(arr[0])) ctx.kernels.ArrNURef_typeid(obj=arr, i0=1) == URef._typeid_from_type(type(arr[1])) ctx.kernels.ArrNURef_typeid(obj=arr, i0=2) == -1 p1 = ctx.kernels.Struct1_getp(obj=arr[0]) p2 = ctx.kernels.ArrNURef_member(obj=arr, i0=0) assert int(ffi.cast("size_t", p1)) == int(ffi.cast("size_t", p2))
import xobjects as xo from xfields import _pkg_root import time import numpy as np import matplotlib.pyplot as plt from scipy.special import wofz mode = 'special_y_0' mode = 'standard' ctx = xo.ContextCpu(omp_num_threads=0) src_code = """ /*gpukern*/ void eval_cerrf_q1( const int n, /*gpuglmem*/ double const* /*restrict*/ re, /*gpuglmem*/ double const* /*restrict*/ im, /*gpuglmem*/ double* /*restrict*/ wz_re, /*gpuglmem*/ double* /*restrict*/ wz_im ) { int tid = 0; for( ; tid < n ; ++tid ) { //autovectorized if( tid < n ) { double const x = re[ tid ]; double const y = im[ tid ]; double wz_x, wz_y; cerrf( x, y, &wz_x, &wz_y );
def test_cerrf_q1(): ctx = xo.ContextCpu(omp_num_threads=2) xx = np.logspace(-8, +8, 51, dtype=np.float64) yy = np.logspace(-8, +8, 51, dtype=np.float64) n_re = len(xx) n_im = len(yy) n_z = len(yy) * len(xx) re_absc = np.arange(n_z, dtype=np.float64).reshape(n_im, n_re) im_absc = np.arange(n_z, dtype=np.float64).reshape(n_im, n_re) wz_cmp_re = np.arange(n_z, dtype=np.float64).reshape(n_im, n_re) wz_cmp_im = np.arange(n_z, dtype=np.float64).reshape(n_im, n_re) for jj, y in enumerate(yy): re_absc[jj, :] = xx[:] for ii, x in enumerate(xx): im_absc[:, ii] = yy[:] # Using scipy's wofz implemenation of the Faddeeva method. This is # (at the time of this writing in 2021) based on the MIT ab-initio # implementation using a combination of Algorithm 680 for large |z| and # Algorithm 916 for the remainder fo C. It claims a relative accuracy of # 1e-13 across the whole of C and is thus suitable to check the accuracy # of the cerrf_q1 implementation which has a target accuracy of 10^{-10} # in the *absolute* error. for jj, y in enumerate(yy): for ii, x in enumerate(xx): z = x + 1.0j * y wz = wofz_scipy(z) wz_cmp_re[jj, ii] = wz.real wz_cmp_im[jj, ii] = wz.imag src_code = """ /*gpukern*/ void eval_cerrf_q1( const int n, /*gpuglmem*/ double const* /*restrict*/ re, /*gpuglmem*/ double const* /*restrict*/ im, /*gpuglmem*/ double* /*restrict*/ wz_re, /*gpuglmem*/ double* /*restrict*/ wz_im ) { int tid = 0; for( ; tid < n ; ++tid ) { //autovectorized if( tid < n ) { double const x = re[ tid ]; double const y = im[ tid ]; double wz_x, wz_y; cerrf_q1( x, y, &wz_x, &wz_y ); wz_re[ tid ] = wz_x; wz_im[ tid ] = wz_y; } } } """ kernel_descriptions = { "eval_cerrf_q1": xo.Kernel( args=[ xo.Arg(xo.Int32, name="n"), xo.Arg(xo.Float64, name="re", const=True, pointer=True), xo.Arg(xo.Float64, name="im", const=True, pointer=True), xo.Arg(xo.Float64, name="wz_re", pointer=True), xo.Arg(xo.Float64, name="wz_im", pointer=True), ], n_threads="n", ), } headers = [ _pkg_root.joinpath("headers/constants.h"), _pkg_root.joinpath("headers/sincos.h"), _pkg_root.joinpath("headers/power_n.h"), _pkg_root.joinpath( "fieldmaps/bigaussian_src/complex_error_function.h"), ] wz_re = np.arange(n_z, dtype=np.float64) wz_im = np.arange(n_z, dtype=np.float64) re_absc_dev = ctx.nparray_to_context_array(re_absc.reshape(n_z)) im_absc_dev = ctx.nparray_to_context_array(im_absc.reshape(n_z)) wz_re_dev = ctx.nparray_to_context_array(wz_re) wz_im_dev = ctx.nparray_to_context_array(wz_im) ctx.add_kernels(sources=[src_code], kernels=kernel_descriptions, extra_headers=headers) ctx.kernels.eval_cerrf_q1(n=n_z, re=re_absc_dev, im=im_absc_dev, wz_re=wz_re_dev, wz_im=wz_im_dev) wz_re = ctx.nparray_from_context_array(wz_re_dev).reshape(n_im, n_re) wz_im = ctx.nparray_from_context_array(wz_im_dev).reshape(n_im, n_re) d_abs_re = np.fabs(wz_re - wz_cmp_re) d_abs_im = np.fabs(wz_im - wz_cmp_im) # NOTE: target accuracy of cerrf_q1 is 0.5e-10 but the algorithm does # not converge to within target accuracy for all arguments in C, # especially close to the real axis. We therfore require that # d_abs_re.max(), d_abs_im.max() < 0.5e-9 assert d_abs_re.max() < 0.5e-9 assert d_abs_im.max() < 0.5e-9
def test_cerrf_all_quadrants(): x0 = 5.33 y0 = 4.29 num_args = 10000 if xo.ContextCpu not in available: return ctx = xo.ContextCpu(omp_num_threads=2) re_max = np.float64(np.sqrt(2.0) * x0) im_max = np.float64(np.sqrt(2.0) * y0) # Extending the sampled area symmetrically into Q3 and Q4 would # get the zeros of w(z) into the fold which are located close to the # first medians of these quadrants at Im(z) = \pm Re(z) for Re(z) > 1.99146 # # This would lead to a degradation in the accuracy by at least an order # of magnitude due to cancellation effects and could distort the test -> # By excluding anything with an imaginary part < -1.95, this should be on # the safe side. np.random.seed(20210811) im_min = np.float64(-1.95) re_min = -re_max re_absc = np.random.uniform(re_min, re_max, num_args) im_absc = np.random.uniform(im_min, im_max, num_args) wz_cmp_re = np.arange(num_args, dtype=np.float64) wz_cmp_im = np.arange(num_args, dtype=np.float64) # Create comparison data for veryfing the correctness of cerrf(). # Cf. the comments about scipy's wofz implementation in test_cerrf_q1() # for details! for ii, (x, y) in enumerate(zip(re_absc, im_absc)): wz = wofz_scipy(x + 1.0j * y) wz_cmp_re[ii] = wz.real wz_cmp_im[ii] = wz.imag src_code = """ /*gpukern*/ void eval_cerrf_all_quadrants( const int n, /*gpuglmem*/ double const* /*restrict*/ re, /*gpuglmem*/ double const* /*restrict*/ im, /*gpuglmem*/ double* /*restrict*/ wz_re, /*gpuglmem*/ double* /*restrict*/ wz_im ) { int tid = 0; for( ; tid < n ; ++tid ) { //autovectorized if( tid < n ) { double const x = re[ tid ]; double const y = im[ tid ]; double wz_x, wz_y; cerrf( x, y, &wz_x, &wz_y ); wz_re[ tid ] = wz_x; wz_im[ tid ] = wz_y; } } } """ kernel_descriptions = { "eval_cerrf_all_quadrants": xo.Kernel( args=[ xo.Arg(xo.Int32, name="n"), xo.Arg(xo.Float64, name="re", const=True, pointer=True), xo.Arg(xo.Float64, name="im", const=True, pointer=True), xo.Arg(xo.Float64, name="wz_re", pointer=True), xo.Arg(xo.Float64, name="wz_im", pointer=True), ], n_threads="n", ), } headers = [ _pkg_root.joinpath("headers/constants.h"), _pkg_root.joinpath("headers/sincos.h"), _pkg_root.joinpath("headers/power_n.h"), _pkg_root.joinpath( "fieldmaps/bigaussian_src/complex_error_function.h"), ] wz_re = np.arange(num_args, dtype=np.float64) wz_im = np.arange(num_args, dtype=np.float64) re_absc_dev = ctx.nparray_to_context_array(re_absc) im_absc_dev = ctx.nparray_to_context_array(im_absc) wz_re_dev = ctx.nparray_to_context_array(wz_re) wz_im_dev = ctx.nparray_to_context_array(wz_im) ctx.add_kernels(sources=[src_code], kernels=kernel_descriptions, extra_headers=headers) ctx.kernels.eval_cerrf_all_quadrants( n=num_args, re=re_absc_dev, im=im_absc_dev, wz_re=wz_re_dev, wz_im=wz_im_dev, ) wz_re = ctx.nparray_from_context_array(wz_re_dev) wz_im = ctx.nparray_from_context_array(wz_im_dev) d_abs_re = np.fabs(wz_re - wz_cmp_re) d_abs_im = np.fabs(wz_im - wz_cmp_im) assert d_abs_re.max() < 0.5e-9 assert d_abs_im.max() < 0.5e-9
def test_tricubic_interpolation(): for context in xo.context.get_test_contexts(): print(f"Test {context.__class__}") scale=0.05 ff = lambda x, y, z: sum([ scale * x**i * y**j * z**k for i in range(4) for j in range(4) for k in range(4)]) dfdx = lambda x, y, z: sum([ i * scale * x**(i-1) * y**j * z**k for i in range(1,4) for j in range(4) for k in range(4)]) dfdy = lambda x, y, z: sum([ j * scale * x**i * y**(j-1) * z**k for i in range(4) for j in range(1,4) for k in range(4)]) dfdz = lambda x, y, z: sum([ k * scale * x**i * y**j * z**(k-1) for i in range(4) for j in range(4) for k in range(1,4)]) dfdxy = lambda x, y, z: sum([ i * j * scale * x**(i-1) * y**(j-1) * z**k for i in range(1,4) for j in range(1,4) for k in range(4)]) dfdxz = lambda x, y, z: sum([ i * k * scale * x**(i-1) * y**j * z**(k-1) for i in range(1,4) for j in range(4) for k in range(1,4)]) dfdyz = lambda x, y, z: sum([ j * k * scale * x**i * y**(j-1) * z**(k-1) for i in range(4) for j in range(1,4) for k in range(1,4)]) dfdxyz = lambda x, y, z: sum([ i * j * k * scale * x**(i-1) * y**(j-1) * z**(k-1) for i in range(1,4) for j in range(1,4) for k in range(1,4)]) NN=21 x_grid = np.linspace(-0.5, 0.5, NN) y_grid = np.linspace(-0.5, 0.5, NN) z_grid = np.linspace(-0.5, 0.5, NN) fieldmap = xf.TriCubicInterpolatedFieldMap(_context=context, x_grid=x_grid, y_grid=y_grid, z_grid=z_grid) ecloud = xf.ElectronCloud(length=1, fieldmap=fieldmap, _buffer=fieldmap._buffer) x0 = fieldmap.x_grid[0] y0 = fieldmap.y_grid[0] z0 = fieldmap.z_grid[0] dx = fieldmap.dx dy = fieldmap.dy dz = fieldmap.dz nx = fieldmap.nx ny = fieldmap.ny for ix in range(NN): for iy in range(NN): for iz in range(NN): index = 0 + 8 * ix + 8 * nx * iy + 8 * nx * ny * iz fieldmap._phi_taylor[index + 0] = ff(x_grid[ix], y_grid[iy], z_grid[iz]) fieldmap._phi_taylor[index + 1] = dfdx(x_grid[ix], y_grid[iy], z_grid[iz]) * dx fieldmap._phi_taylor[index + 2] = dfdy(x_grid[ix], y_grid[iy], z_grid[iz]) * dy fieldmap._phi_taylor[index + 3] = dfdz(x_grid[ix], y_grid[iy], z_grid[iz]) * dz fieldmap._phi_taylor[index + 4] = dfdxy(x_grid[ix], y_grid[iy], z_grid[iz]) * dx * dy fieldmap._phi_taylor[index + 5] = dfdxz(x_grid[ix], y_grid[iy], z_grid[iz]) * dx * dz fieldmap._phi_taylor[index + 6] = dfdyz(x_grid[ix], y_grid[iy], z_grid[iz]) * dy * dz fieldmap._phi_taylor[index + 7] = dfdxyz(x_grid[ix], y_grid[iy], z_grid[iz]) * dx * dy * dz n_parts = 1000 rng = default_rng(12345) x_test = rng.random(n_parts) * 1.2 - 0.6 y_test = rng.random(n_parts) * 1.2 - 0.6 tau_test = rng.random(n_parts) * 1.2 - 0.6 p0c = 450e9 testp0 = xp.Particles(p0c=p0c) beta0 = testp0.beta0 part = xp.Particles(_context=context, x=x_test, y=y_test, zeta=beta0*tau_test, p0c=p0c) ecloud.track(part) part.move(_context=xo.ContextCpu()) mask_p = part.state != -11 true_px = np.array([-dfdx(xx, yy, zz) for xx, yy, zz in zip(part.x[mask_p], part.y[mask_p], part.zeta[mask_p] / part.beta0[mask_p])]) true_py = np.array([-dfdy(xx, yy, zz) for xx, yy, zz in zip(part.x[mask_p], part.y[mask_p], part.zeta[mask_p] / part.beta0[mask_p])]) true_ptau = np.array([-dfdz(xx, yy, zz) for xx, yy, zz in zip(part.x[mask_p], part.y[mask_p], part.zeta[mask_p] / part.beta0[mask_p])]) # print(true_px[:5]) # print(part.ptau[:5]) # print(part.state[:5]) # print(f"px kick diff.: {np.mean(part.px[mask_p]-true_px):.2e} +- {np.std(part.px[mask_p] - true_px):.2e}") # print(f"py kick diff.: {np.mean(part.py[mask_p]-true_py):.2e} +- {np.std(part.py[mask_p] - true_py):.2e}") # print(f"ptau kick diff.: {np.mean(part.ptau[mask_p]-true_ptau):.2e} +- {np.std(part.ptau[mask_p] - true_ptau):.2e}") # print(np.allclose(part.px[mask_p], true_px, atol=1.e-13, rtol=1.e-13)) # print(np.allclose(part.py[mask_p], true_py, atol=1.e-13, rtol=1.e-13)) # print(np.allclose(part.ptau[mask_p], true_ptau, atol=1.e-13, rtol=1.e-13)) # print(np.max(np.abs(part.px[mask_p]- true_px))) # print(np.max(np.abs(part.py[mask_p]- true_py))) # print(np.max(np.abs(part.ptau[mask_p]- true_ptau))) assert np.allclose(part.px[mask_p], true_px, atol=1.e-13, rtol=1.e-13) assert np.allclose(part.py[mask_p], true_py, atol=1.e-13, rtol=1.e-13) assert np.allclose(part.ptau[mask_p], true_ptau, atol=1.e-13, rtol=1.e-13)
# copyright ################################# # # This file is part of the Xfields Package. # # Copyright (c) CERN, 2021. # # ########################################### # import numpy as np from scipy.constants import e as qe import xobjects as xo import xtrack as xt import xfields as xf import xpart as xp import ducktrack as dtk context = xo.ContextCpu() # crossing plane alpha = 0.7 # crossing angle phi = 0.8 # separations x_bb_co = 5e-3 y_bb_co = -4e-3 charge_slices = np.array([1e16, 2e16, 5e16]) z_slices = np.array([-60., 2., 55.]) # Single particle properties q_part = qe
def test_beambeam3d_old_interface(): for context in xo.context.get_test_contexts(): print(repr(context)) # crossing plane alpha = 0.7 # crossing angle phi = 0.8 # separations x_bb_co=5e-3 y_bb_co=-4e-3 charge_slices=np.array([1e16, 2e16, 5e16]) z_slices=np.array([-6., 0.2, 5.5]) x_co = 2e-3 px_co= 1e-6 y_co=-3e-3 py_co=-2e-6 zeta_co=0.01 delta_co=1.2e-3 d_x=1.5e-3 d_px=1.6e-6 d_y=-1.7e-3 d_py=-1.8e-6 d_zeta=0.019 d_delta=3e-4 for ss in sigma_configurations(): (Sig_11_0, Sig_12_0, Sig_13_0, Sig_14_0, Sig_22_0, Sig_23_0, Sig_24_0, Sig_33_0, Sig_34_0, Sig_44_0) = ss Sig_11_0 = Sig_11_0 + np.zeros_like(charge_slices) Sig_12_0 = Sig_12_0 + np.zeros_like(charge_slices) Sig_13_0 = Sig_13_0 + np.zeros_like(charge_slices) Sig_14_0 = Sig_14_0 + np.zeros_like(charge_slices) Sig_22_0 = Sig_22_0 + np.zeros_like(charge_slices) Sig_23_0 = Sig_23_0 + np.zeros_like(charge_slices) Sig_24_0 = Sig_24_0 + np.zeros_like(charge_slices) Sig_33_0 = Sig_33_0 + np.zeros_like(charge_slices) Sig_34_0 = Sig_34_0 + np.zeros_like(charge_slices) Sig_44_0 = Sig_44_0 + np.zeros_like(charge_slices) print('------------------------') print(ss) bb_dtk = dtk.elements.BeamBeam6D( phi=phi, alpha=alpha, x_bb_co=x_bb_co, y_bb_co=y_bb_co, charge_slices=charge_slices, zeta_slices=z_slices, sigma_11=Sig_11_0[0], sigma_12=Sig_12_0[0], sigma_13=Sig_13_0[0], sigma_14=Sig_14_0[0], sigma_22=Sig_22_0[0], sigma_23=Sig_23_0[0], sigma_24=Sig_24_0[0], sigma_33=Sig_33_0[0], sigma_34=Sig_34_0[0], sigma_44=Sig_44_0[0], x_co=x_co, px_co=px_co, y_co=y_co, py_co=py_co, zeta_co=zeta_co, delta_co=delta_co, d_x=d_x, d_px=d_px, d_y=d_y, d_py=d_py, d_zeta=d_zeta, d_delta=d_delta ) bb = xf.BeamBeamBiGaussian3D(old_interface=bb_dtk.to_dict(), _context=context) dtk_part = dtk.TestParticles( p0c=6500e9, x=-1.23e-3, px = 50e-3, y = 2e-3, py = 27e-3, sigma = 3., delta = 2e-4) part=xp.Particles(_context=context, **dtk_part.to_dict()) bb.track(part) bb_dtk.track(dtk_part) part.move(_context=xo.ContextCpu()) for cc in 'x px y py zeta delta'.split(): val_test = getattr(part, cc)[0] val_ref = getattr(dtk_part, cc) print('') print(f'ducktrack: {cc} = {val_ref:.12e}') print(f'xsuite: {cc} = {val_test:.12e}') assert np.isclose(val_test, val_ref, rtol=0, atol=5e-12)
import xobjects as xo class StructA(xo.Struct): fa = xo.Float64 fb = xo.Int64[:] class StructB(xo.Struct): fa = xo.Float64 fb = xo.Ref[xo.Int64[:]] ctx = xo.ContextCpu() sa = StructA(fa=3, fb=4, _context=ctx) sb = StructB(fa=3, _context=ctx) sb.fb = [1, 2, 3, 4] sa.fb[3] = 1 sb.fb[3] = 1 ks = { "ka": xo.Kernel([xo.Arg(StructA, name="obj")]), "kb": xo.Kernel([xo.Arg(StructB, name="obj")]), } source = """ void ka(StructA obj){ printf("hello\\n"); printf("fa=%g\\n", StructA_get_fa(obj));