def remove_dipolar_kicks(self): temp_lattice = sixtracklib.Elements() self.elements sixtracklib.Drift(cbuffer=temp_lattice.cbuffer) temp_tc_index = temp_lattice.cbuffer.n_objects temp_tc = sixtracklib.TriCub(cbuffer=temp_lattice.cbuffer) first_ecloud = list(self.tricubs.keys())[0] temp_tc.length = self.tricubs[first_ecloud].length temp_tc.x_shift = 0. temp_tc.y_shift = 0. temp_tc.tau_shift = 0. temp_tc.dipolar_kick_px = 0. temp_tc.dipolar_kick_py = 0. temp_tc.dipolar_kick_ptau = 0. temp_ps = particles_set = sixtracklib.ParticlesSet() particles = particles_set.Particles(num_particles=1) temp_part = pysixtrack.Particles(p0c=self.partCO.p0c) temp_part.x = 0 temp_part.px = 0 temp_part.y = 0 temp_part.py = 0 temp_part.tau = 0 temp_part.ptau = 0 temp_part.state = 1 temp_part.partid = 0 temp_part.elemid = 0 temp_part.turn = 0 particles.from_pysixtrack(temp_part, 0) temp_job = sixtracklib.TrackJob(temp_lattice, temp_ps, device=None) temp_tricub_data_buffer_id = temp_job.add_stored_buffer( cbuffer=self.tricub_data_buffer) first_tricub_data = list(self.tricub_data.keys())[0] sixtracklib.TriCub_buffer_create_assign_address_item( temp_job, temp_tc_index, temp_tricub_data_buffer_id, self.tricub_data_indices[first_tricub_data]) temp_job.commit_address_assignments() temp_job.assign_all_addresses() temp_job.track_until(1) dipolar_kick_px = particles.px[0] dipolar_kick_py = particles.py[0] dipolar_kick_ptau = particles.ptau[0] print(dipolar_kick_px, dipolar_kick_py, dipolar_kick_ptau) #dipolar_kick_px = 0.* particles.px[0] #dipolar_kick_py = 0.*particles.py[0] #dipolar_kick_ptau = 0.*particles.ptau[0] for tc in self.tricubs.keys(): tc_index = self.tricub_indices[tc] tricub = self.job.beam_elements_buffer.get_object(tc_index) tricub.dipolar_kick_px = dipolar_kick_px tricub.dipolar_kick_py = dipolar_kick_py tricub.dipolar_kick_ptau = dipolar_kick_ptau self.job.push_beam_elements() return
def setup_sixtracklib(self, pysixtrack_elements, pyht_beam): elements = pystlib.Elements.from_line(pysixtrack_elements) elements.BeamMonitor(num_stores=self.nturns) particles = pystlib.Particles.from_ref(self.npart, p0c=self.p0c, mass0=self.A * nmass * 1e9, q0=self.Q) particles.x[:] = pyht_beam.x particles.px[:] = pyht_beam.xp particles.y[:] = pyht_beam.y particles.py[:] = pyht_beam.yp particles.zeta[:] = pyht_beam.z particles.delta[:] = pyht_beam.dp particles.rpp[:] = 1. / (pyht_beam.dp + 1) restmass = self.mass * c**2 restmass_sq = restmass**2 E0 = np.sqrt((self.p0 * c)**2 + restmass_sq) p = self.p0 * (1 + pyht_beam.dp) E = np.sqrt((p * c)**2 + restmass_sq) particles.psigma[:] = (E - E0) / (self.beta * self.p0 * c) gammai = E / restmass betai = np.sqrt(1 - 1. / (gammai * gammai)) particles.rvv[:] = betai / self.beta ### prepare trackjob in SixTrackLib job = pystlib.TrackJob(elements, particles) return job
def track_particles(x, px, y, py, n_turns, opencl=True): """Wrap Sixtracklib and track the particles requested Parameters ---------- x : ndarray initial conditions px : ndarray initial conditions y : ndarray initial conditions py : ndarray initial conditions n_turns : unsigned int number of turns to perform opencl : bool (optional) use opencl backend (default: True) Returns ------- particles object Sixtracklib particles object """ assert len(x) == len(px) assert len(x) == len(py) assert len(x) == len(y) particles = st.Particles.from_ref(num_particles=len(x), p0c=6.5e12) particles.x += x particles.px += px particles.y += y particles.py += py lattice = st.Elements.fromfile( os.path.join(os.path.dirname(__file__), 'data/beam_elements.bin')) if opencl: cl_job = st.TrackJob(lattice, particles, device="opencl:0.0") else: cl_job = st.TrackJob(lattice, particles) status = cl_job.track_until(n_turns) cl_job.collect_particles() return particles
def __init__(self, line, eclouds_info, particles_set, device=None): self.tricub_data_buffer = cobjects.CBuffer() self.tricub_data = {} self.tricub_data_indices = {} self.tricubs = {} self.tricub_indices = {} self.tricub_data_buffer_ids = {} self.elements = sixtracklib.Elements() self.tricub_data_buffer = cobjects.CBuffer() self.eclouds_info = eclouds_info self.tune_is_valid_list = [] self.turn_q_list = [] self.q1_list = [] self.q2_list = [] self.qx_list = [] self.qy_list = [] self.n_particles = len(particles_set.particles[0].particle_id) ecloud_list = eclouds_info['length'].keys() print( f'Number of elements in line before cleaning: {len(line.elements)}' ) self.clean_line(line, ecloud_list) print( f'Number of elements in line after cleaning: {len(line.elements)}') for element, element_name in zip(line.elements, line.element_names): element_type = element.__class__.__name__ if element_name in ecloud_list: tc_index = self.elements.cbuffer.n_objects tc = sixtracklib.TriCub(cbuffer=self.elements.cbuffer) tc.x_shift = self.eclouds_info['x_CO'][element_name] tc.y_shift = self.eclouds_info['y_CO'][element_name] tc.tau_shift = self.eclouds_info['tau_CO'][element_name] tc.length = self.eclouds_info['length'][element_name] self.tricubs[element_name] = tc self.tricub_indices[element_name] = tc_index else: getattr(self.elements, element_type)(**element.to_dict(keepextra=True)) self.job = sixtracklib.TrackJob(self.elements, particles_set, device=device) return
def __init__(self, x, px, y, py, opencl=False): """ Parameters ---------- x : ndarray initial conditions px : ndarray initial conditions y : ndarray initial conditions py : ndarray initial conditions opencl : bool (optional) use opencl backend (default: False) Returns ------- particles object Sixtracklib particles object """ assert len(x) == len(px) assert len(x) == len(py) assert len(x) == len(y) self.particles = st.Particles.from_ref(num_particles=len(x), p0c=6.5e12) self.particles.x += x self.particles.px += px self.particles.y += y self.particles.py += py lattice = st.Elements.fromfile( os.path.join(os.path.dirname(__file__), 'data/beam_elements.bin')) if opencl: self.cl_job = st.TrackJob(lattice, self.particles, device="opencl:0.0") else: self.cl_job = st.TrackJob(lattice, self.particles)
def setup_sixtracklib_fft(self, pysixtrack_elements): elements = pystlib.Elements.from_line(pysixtrack_elements) elements.BeamMonitor(num_stores=self.nturns) particles = pystlib.Particles.from_ref(self.npart, p0c=self.p0c) particles.x += np.linspace(0, 1e-6, self.npart) particles.y += np.linspace(0, 1e-6, self.npart) job = pystlib.TrackJob(elements, particles) return job
def trackWithSTL(self, beam, nTurns, outputAtBPM=True, finalPhaseSpace=False): # track with BPMs elements = self.sixTrackLib(nTurns, installBPMs=outputAtBPM, finalPhaseSpace=finalPhaseSpace) particles = beam.sixTrackLibParticles() jobBPM = stl.TrackJob(elements, particles, device=None) jobBPM.track_until(nTurns) jobBPM.collect() # bring tracking results into same shape as model output spatial = list() for bpm in jobBPM.output.particles: x = bpm.x.reshape(-1, len(beam.bunch)) px = bpm.px.reshape(-1, len(beam.bunch)) y = bpm.y.reshape(-1, len(beam.bunch)) py = bpm.py.reshape(-1, len(beam.bunch)) sigma = bpm.zeta.reshape(-1, len( beam.bunch)) # double check if zeta really is sigma psigma = bpm.psigma.reshape(-1, len(beam.bunch)) delta = bpm.delta.reshape(-1, len(beam.bunch)) invDelta = bpm.rpp.reshape(-1, len(beam.bunch)) velocityRatio = 1 / bpm.rvv.reshape(-1, len(beam.bunch)) spatialCoordinates = np.stack( [x, px, y, py, sigma, psigma, delta, invDelta, velocityRatio]) spatial.append(spatialCoordinates) # (dim, turn, particle) spatial = np.stack(spatial) # bpm, dim, turn, particle output = [spatial[:, :, i, :] for i in range(spatial.shape[2])] output = np.concatenate(output) # bpm, dim, particle return torch.as_tensor(np.transpose(output, (2, 1, 0)), ) # particle, dim, bpm
# directory to save the final distribution parts_distribution_dict = {'x': [], 'px':[], 'y': [], 'py': [], 'sigma': [], 'delta': []} # directory to save the tbt emittances tbt_dict = {'turn':[], 'time':[], 'intensity':[], 'neps_x':[], 'neps_y':[], 'std_sigma':[]} time_cum = 0 if pp.track_with == 'sixtracklib': ps = sixtracklib.ParticlesSet().fromfile('input/sixtracklib.particles') t_start = datetime.datetime.now() print('%s: start tracking %d turns'%(str(t_start)[:-7], pp.n_turns_max)) if args.device is None: job = sixtracklib.TrackJob(elements, ps) else: job = sixtracklib.TrackJob(elements, ps, device=args.device) # Collect elements job.collect() cravity1_id = line.element_names.index('cravity.1') cravity1 = job.beam_elements_buffer.get_object(cravity1_id) assert cravity1 is not None cravity2_id = line.element_names.index('cravity.2') cravity2 = job.beam_elements_buffer.get_object(cravity2_id) assert cravity2 is not None
ecloud_lattice.set_optics_CO(optics, partCO) ecloud_lattice.add_tricub_data(pinch_path, 'drift', max_z=max_tau) ecloud_lattice.remove_dipolar_kicks() tricub_to_tricub_data = {} for key in eclouds_info['length'].keys(): tricub_to_tricub_data[key] = 'drift' ecloud_lattice.finalize_assignments(tricub_to_tricub_data) job = ecloud_lattice.job else: line.remove_inactive_multipoles(inplace=True) line.remove_zero_length_drifts(inplace=True) line.merge_consecutive_drifts(inplace=True) elements = sixtracklib.Elements.from_line(line) job = sixtracklib.TrackJob(elements, ps, device=device) end_setup_time = time.time() print(f'Setting up time: {(end_setup_time - start_time)/60.}mins') start_tracking = time.time() job.track_until(turn_to_track) job.collect() end_tracking = time.time() print(f'Tracking time: {(end_tracking - start_tracking)/60.}mins') parts = job.output.particles[0] shape = A1_A2_in_sigma.shape[:2] init_dict = { 'x': init_denormalized_6D[:, 0].reshape(shape),
tc = st.TriCub(cbuffer=lattice.cbuffer) elem = lattice.Drift(length=0.5) elem = lattice.LimitRect(xmin=-0.5, xmax=1.5, ymin=-1.5, ymax=0.5) tc8_index = lattice.cbuffer.n_objects # Third TriCub element: index 8 tc = st.TriCub(cbuffer=lattice.cbuffer) # b) the particle set particle_sets = st.ParticlesSet() particles = particle_sets.Particles(num_particles=100) # ------------------------------------------------------------------------------ # 2) Create the track_job; currently only CPU is supported job = st.TrackJob(lattice, particle_sets) # ------------------------------------------------------------------------------ # 3) Create the data buffer for the TriCubData instances and hand it over to # the track_job for management: tricub_data_buffer = CBuffer() tc_data_0_index = tricub_data_buffer.n_objects tc_data_0 = st.TriCubData(cbuffer=tricub_data_buffer, nx=100, ny=100, nz=100) tc_data_1_index = tricub_data_buffer.n_objects tc_data_1 = st.TriCubData(cbuffer=tricub_data_buffer, nx=10, ny=16, nz=8) tricub_data_buffer_id = job.add_stored_buffer(cbuffer=tricub_data_buffer)
Delta_psi = 0.32 # the peak of the spectrum print('peaked noise at {}'.format(Delta_psi)) # B. Parameters for ksi mean = 0.0 std = 0.02 # the rms width of the noise spectrum psi_t = 0 psi_t_list = [] # list to append the phase of the noise signal # C. create the phase of the noise signal for i in range(0, n_turns): psi_t_list.append(psi_t) ksi = np.random.normal(mean, std) # different seed on each turn psi_t = psi_t + 2 * np.pi * Delta_psi + 2 * np.pi * ksi # D. Construct the noise signal noiseKicks = phi_0 * np.cos(psi_t_list) job = sixtracklib.TrackJob(elements, ps) # if you want to update elements, check job002.. time_cum = 0 for turn in range(1, n_turns + 1): job.push_beam_elements() job.track_until(turn) time_cum += circumference / (ps.particles[0].beta0[0] * pysixtrack.Particles.clight) job.collect_particles() res = ps.particles[0] if dpy_kick: print('dpy_kick') # Uncomment for amplitude noise
def test_kicks( cmp_file_name="precomputed_kicks.pickle", device_str=None, abs_tol=1e-15, rel_tol=0.0, ): ####### load file with correct kicks ####### path_to_testdir = sttest.config.PATH_TO_TESTDATA_DIR assert path_to_testdir is not None assert os.path.exists(path_to_testdir) assert os.path.isdir(path_to_testdir) path_to_cmp_file = os.path.join(path_to_testdir, "tricub", cmp_file_name) assert os.path.exists(path_to_cmp_file) with open(path_to_cmp_file, "rb") as fp: n_part, prng_seed, kicks = pickle.load(fp) assert n_part > 0 assert prng_seed is not None assert kicks is not None np.random.seed(int(prng_seed)) lattice = st.Elements() tc_index = lattice.cbuffer.n_objects tc = st.TriCub(cbuffer=lattice.cbuffer) tc.length = 1.0 particles_set = st.ParticlesSet() particles = particles_set.Particles(num_particles=n_part) nx = 5 ny = 7 nz = 9 A = np.random.rand(nx, ny, nz, 8) * 1.0e-3 dx = 0.001 dy = 0.002 dz = 0.003 x0 = -(nx // 2) * dx y0 = -(ny // 2) * dy z0 = -(nz // 2) * dz test_x = x0 + (nx - 2) * dx * np.random.rand(n_part) test_y = y0 + (ny - 2) * dy * np.random.rand(n_part) test_z = z0 + (nz - 2) * dz * np.random.rand(n_part) for i_part in range(n_part): part = pysixtrack.Particles() part.x = test_x[i_part] part.y = test_y[i_part] part.tau = test_z[i_part] part.partid = i_part part.state = 1 part.elemid = 0 part.turn = 0 particles.from_pysixtrack(part, i_part) job = st.TrackJob(lattice, particles_set, device=device_str) tricub_data_buffer = cobjects.CBuffer() tc_data_index = tricub_data_buffer.n_objects tc_data = st.TriCubData(cbuffer=tricub_data_buffer, nx=nx, ny=ny, nz=nz) tc_data.x0 = x0 tc_data.y0 = y0 tc_data.z0 = z0 tc_data.dx = dx tc_data.dy = dy tc_data.dz = dz tc_data.mirror_x = 0 tc_data.mirror_y = 0 tc_data.mirror_z = 0 scale = [1.0, dx, dy, dz, dx * dy, dx * dz, dy * dz, (dx * dy) * dz] for ii in range(nx): for jj in range(ny): for kk in range(nz): for ll in range(8): tc_data.table_addr[ll + 8 * (ii + nx * (jj + ny * kk))] = (A[ii, jj, kk, ll] * scale[ll]) tricub_data_buffer_id = job.add_stored_buffer(cbuffer=tricub_data_buffer) st.TriCub_buffer_create_assign_address_item(job, tc_index, tricub_data_buffer_id, tc_data_index) job.commit_address_assignments() job.assign_all_addresses() job.track_until(1) job.collect() assert np.allclose(kicks[:, 0], particles.px, rel_tol, abs_tol) assert np.allclose(kicks[:, 1], particles.py, rel_tol, abs_tol) assert np.allclose(kicks[:, 2], particles.ptau, rel_tol, abs_tol)
def prepare(npart=int(1e6), p0c=p0c, elements=elements, device='cpu'): particles = pystlib.Particles.from_ref(npart, p0c=p0c) particles.x += np.linspace(0, 1e-6, npart) job = pystlib.TrackJob(elements, particles, device=device) return job
p0c_ev=madx.sequence["lhcb1"].beam.pc * 1e9) particle_set.to_file("here.particleset") # In[10]: # Or from dumped file # particle_set = sixtracklib.ParticlesSet.fromfile("here.particleset") # In[11]: sixtracklib.TrackJob.print_nodes("opencl") # In[ ]: # For GPU use, specify GPU device_id from information given by clinfo in the cell above job = sixtracklib.TrackJob(elements, particle_set, device="opencl:0.2") job.track_until(100) job.collect() # transfer data back from GPU, fine to call if CPU only # In[ ]: final_output = { "x": job.output.particles[0].x, "px": job.output.particles[0].px, "y": job.output.particles[0].y, "py": job.output.particles[0].py, "zeta": job.output.particles[0].zeta, "delta": job.output.particles[0].delta, "at_turn": job.output.particles[0].at_turn, }
path_to_eb = args.beam_elements_buffer beam_elements = pyst.Elements.fromfile(path_to_eb) num_beam_monitors = pyst.append_beam_monitors_to_lattice( beam_elements.cbuffer, args.until_turn_elem_by_elem, args.until_turn_turn_by_turn, args.until_turn_output, args.skip_out_turns) print("Added {0} beam monitors to the lattice".format(num_beam_monitors)) # ======================================================================= # Create the TrackJob instance job = pyst.TrackJob(beam_elements.cbuffer, particles_set.cbuffer, until_turn_elem_by_elem=args.until_turn_elem_by_elem, arch=args.architecture, device_id=args.device_id) # ======================================================================== # Print summary about configuration print("TrackJob:") print("----------------------------------------------------------------") print("architecture : {0}".format(job.arch_str)) if job.has_elem_by_elem_output: print("has elem_by_elem output : yes") else: print("has elem_by_elem output : no")
lattice.BeamMonitor() assert lattice.cbuffer.get_object(bm0_index).out_address == 0 assert lattice.cbuffer.get_object(bm1_index).out_address == 0 assert lattice.cbuffer.get_object(bm2_index).out_address == 0 pset = st.ParticlesSet() pset.Particles(num_particles=100) output_buffer = st.ParticlesSet() out_buffer0_index = output_buffer.cbuffer.n_objects output_buffer.Particles(num_particles=100) out_buffer1_index = output_buffer.cbuffer.n_objects output_buffer.Particles(num_particles=512) job = st.TrackJob(lattice, pset) # hand the output_buffer over to the track job: output_buffer_id = job.add_stored_buffer(cbuffer=output_buffer) assert output_buffer_id != st_ARCH_ILLEGAL_BUFFER_ID.value # use the predefined lattice_buffer_id value to refer to the # beam elements buffer lattice_buffer_id = st_ARCH_BEAM_ELEMENTS_BUFFER_ID.value # use the _type_id attributes of beam monitors and particle sets to # refer to these object types: particle_set_type_id = output_buffer.cbuffer.get_object( out_buffer0_index)._typeid beam_monitor_type_id = lattice.cbuffer.get_object(bm0_index)._typeid
eb_data_begin = eb.base eb_data_size = eb.size eb_num_objects = eb.n_objects pb = CBuffer.fromfile(path_to_particle_data) assert (pb.n_objects > 0) particle_type_id = pb.get_object_typeid(0) pb_data_begin = pb.base pb_data_size = pb.size pb_num_objects = pb.n_objects # Testcase 1: only default parameters, no elem by elem output, no # beam monitors job = pyst.TrackJob(eb, pb) assert (job.arch_str == 'cpu') assert (job.particles_buffer == pb) assert (job.beam_elements_buffer == eb) assert (not job.has_output_buffer) assert (job.output_buffer is None) assert (not job.has_elem_by_elem_output) assert (not job.has_beam_monitor_output) assert (job.num_beam_monitors == 0) assert (job.elem_by_elem_output_offset == 0) assert (job.beam_monitor_output_offset == 0) del job job = None
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import numpy as np import sixtracklib as pyst elements = pyst.Elements() elements.Drift(length=1.2) elements.Multipole(knl=[0, 0.001]) particles = pyst.Particles.from_ref(num_particles=10, p0c=1e9) particles.px += np.linspace(0, 1e-2, 10) job = pyst.TrackJob(elements, particles) status = job.track_until(1) print(particles.x) print(particles.px) if pyst.supports('opencl'): jobcl = pyst.TrackJob(elements, particles, device="opencl:0.0") status = jobcl.track_until(2) jobcl.collect() print(particles.x) print(particles.px)
def full_track_particles(radiuses, alpha, theta1, theta2, n_turns, opencl=True): """Complete tracking of particles for the given number of turns Parameters ---------- radiuses : ndarray initial conditions alpha : ndarray initial conditions theta1 : ndarrayq initial conditions theta2 : ndarray initial conditions n_turns : unsigned int number of turns to perform opencl : bool (optional) use opencl backend (default: True) Returns ------- tuple (r, alpha, theta1, theta2), shape = (initial conditios, n turns) """ x, px, y, py = polar_to_cartesian(radiuses, alpha, theta1, theta2) x, px, y, py = convert_norm_to_physical(x, px, y, py) particles = st.Particles.from_ref(num_particles=len(x), p0c=6.5e12) particles.x += x particles.px += px particles.y += y particles.py += py lattice = st.Elements.fromfile( os.path.join(os.path.dirname(__file__), 'data/beam_elements.bin')) if opencl: cl_job = st.TrackJob(lattice, particles, device="opencl:0.0") else: cl_job = st.TrackJob(lattice, particles) data_r = np.empty((len(x), n_turns)) data_a = np.empty((len(x), n_turns)) data_th1 = np.empty((len(x), n_turns)) data_th2 = np.empty((len(x), n_turns)) for i in range(n_turns): status = cl_job.track_until(i) cl_job.collect_particles() # print(particles.at_turn) t_x, t_px, t_y, t_py = convert_physical_to_norm( particles.x, particles.px, particles.y, particles.py) data_r[:, i], data_a[:, i], data_th1[:, i], data_th2[:, i] = cartesian_to_polar( t_x, t_px, t_y, t_py) return data_r, data_a, data_th1, data_th2
import sixtracklib as st from sixtracklib.stcommon import st_ClContext_uses_optimized_tracking from sixtracklib.stcommon import st_ClContextBase_is_debug_mode_enabled from sixtracklib.stcommon import st_ARCH_ILLEGAL_KERNEL_ID if __name__ == '__main__': lattice = st.Elements() drift = lattice.Drift(length=1.0) pset = st.Particles(num_particles=1000) job = st.TrackJob(lattice, pset, device="opencl:0.0") ctrl = job.controller assert not st_ClContextBase_is_debug_mode_enabled(ctrl.pointer) assert st_ClContext_uses_optimized_tracking(ctrl.pointer) k_id = ctrl.find_kernel_by_name( "st_Track_particles_until_turn_opt_pp_opencl") assert k_id != st_ARCH_ILLEGAL_KERNEL_ID.value print(f"""" current workgroup size (0 == max) : {ctrl.kernel_workgroup_size( k_id )} max workgroup size : {ctrl.kernel_max_workgroup_size(k_id )} preferred workgroup size multiple : {ctrl.kernel_preferred_workgroup_size_multiple(k_id)} """) prog_id = ctrl.program_id_by_kernel_id(k_id) used_compile_options = ctrl.program_compile_options(prog_id) prog_compile_report = ctrl.program_compile_report(prog_id)
partset = pyst.ParticlesSet() particles = partset.Particles(num_particles=100) particles.px += range(len(particles.px)) particles.px *= 1e-2 # Print enabled architectures; pass any of these values as arch= # to the construction of the track job; default == cpu print("enabled archs: {0}".format(', '.join( pyst.TrackJob.enabled_archs()))) # ========================================================================= # CPU based Track Job: job = pyst.TrackJob(elements.cbuffer, partset.cbuffer) # Track until every particle is at the begin of turn 5: status = job.track_until(5) # status should be 0 if success, otherwise < 0 # Track until every particle is at the begin of turn 10: status = job.track_until( 10) # status should be 0 if success, otherwise < 0 # prepare the particles buffer for read-out: job.collect() # Track next turn using track_line in two steps: status = job.track_line(0, 1) # Track over the drift, status should be 0 status = job.track_line(1, 2, finish_turn=True) # finish tracking line
st_Buffer_delete(ptr_belem_buffer) st_ElemByElemConfig_delete(elem_by_elem_config) ptr_belem_buffer = st_NullBuffer elem_by_elem_config = st_NullElemByElemConfig print("cmp_output_buffer tracking finished") # ------------------------------------------------------------------------- track_pb = CBuffer() track_particles = st.makeCopy(initial_particles, cbuffer=track_pb) arch = "opencl" device = "2.0" job = st.TrackJob(eb, track_pb, until_turn_elem_by_elem, arch, device) print("job setup complete") assert job.arch_str == 'opencl' assert job.has_output_buffer assert job.num_beam_monitors > 0 assert job.has_elem_by_elem_output assert job.has_beam_monitor_output job.track_elem_by_elem(until_turn_elem_by_elem) assert job.last_track_status == st_TRACK_SUCCESS.value print("elem by elem tracking finished") job.track_until(until_turn) assert job.last_track_status == st_TRACK_SUCCESS.value
# Build elements for SixTrackLib elements = pystlib.Elements.from_line( pysixtrack.Line.from_madx_sequence(mad.sequence.sps)[0]) nturns = 2**14 ps_line, _ = pysixtrack.Line.from_madx_sequence(mad.sequence.sps) elements = pystlib.Elements() elements.append_line(ps_line) bpm = elements.BeamMonitor(num_stores=nturns) # Track one turn npart = 10 particles = pystlib.Particles.from_ref(npart, p0c=26e6) particles.x += np.linspace(0, 1e-6, npart) job = pystlib.TrackJob(elements, particles, until_turn_elem_by_elem=1) job.track_elem_by_elem(1) pl.plot(job.output.particles[0].x[1::10]) # Track many turns CPU npart = 10 particles = pystlib.Particles.from_ref(npart, p0c=26e6) particles.x += np.linspace(0, 1e-6, npart) job = pystlib.TrackJob(elements, particles) job.track_until(nturns) # Find tunes ff = np.linspace(0, 0.5, nturns // 2 + 1) x = job.output.particles[0].x[1::npart] xf = abs(np.fft.rfft(x)) pl.plot(ff, xf)
def track_particle_sixtracklib( line, partCO, Dx_wrt_CO_m, Dpx_wrt_CO_rad, Dy_wrt_CO_m, Dpy_wrt_CO_rad, Dsigma_wrt_CO_m, Ddelta_wrt_CO, n_turns, device=None): Dx_wrt_CO_m, Dpx_wrt_CO_rad,\ Dy_wrt_CO_m, Dpy_wrt_CO_rad,\ Dsigma_wrt_CO_m, Ddelta_wrt_CO = vectorize_all_coords( Dx_wrt_CO_m, Dpx_wrt_CO_rad, Dy_wrt_CO_m, Dpy_wrt_CO_rad, Dsigma_wrt_CO_m, Ddelta_wrt_CO) import sixtracklib elements = sixtracklib.Elements() elements.BeamMonitor(num_stores=n_turns) elements.append_line(line) n_part = len(Dx_wrt_CO_m) # Build PyST particle ps = sixtracklib.ParticlesSet() p = ps.Particles(num_particles=n_part) for i_part in range(n_part): part = partCO.copy() part.x += Dx_wrt_CO_m[i_part] part.px += Dpx_wrt_CO_rad[i_part] part.y += Dy_wrt_CO_m[i_part] part.py += Dpy_wrt_CO_rad[i_part] part.sigma += Dsigma_wrt_CO_m[i_part] part.delta += Ddelta_wrt_CO[i_part] part.partid = i_part part.state = 1 part.elemid = 0 part.turn = 0 p.from_pysixtrack(part, i_part) if device is None: job = sixtracklib.TrackJob(elements, ps) else: job = sixtracklib.TrackJob(elements, ps, device=device) job.track_until(n_turns) job.collect() res = job.output x_tbt = res.particles[0].x.reshape(n_turns, n_part) px_tbt = res.particles[0].px.reshape(n_turns, n_part) y_tbt = res.particles[0].y.reshape(n_turns, n_part) py_tbt = res.particles[0].py.reshape(n_turns, n_part) sigma_tbt = res.particles[0].sigma.reshape(n_turns, n_part) delta_tbt = res.particles[0].delta.reshape(n_turns, n_part) # For now data are saved at the end of the turn by STlib and at the beginning by the others #x_tbt[1:, :] = x_tbt[:-1, :] #px_tbt[1:, :] = px_tbt[:-1, :] #y_tbt[1:, :] = y_tbt[:-1, :] #py_tbt[1:, :] = py_tbt[:-1, :] #sigma_tbt[1:, :] = sigma_tbt[:-1, :] #delta_tbt[1:, :] = delta_tbt[:-1, :] #x_tbt[0, :] = p.x #px_tbt[0, :] = p.px #y_tbt[0, :] = p.y #py_tbt[0, :] = p.py #sigma_tbt[0, :] = p.sigma #delta_tbt[0, :] = p.delta print('Done loading!') return x_tbt, px_tbt, y_tbt, py_tbt, sigma_tbt, delta_tbt
# print(x,y,z) # TI.val(x,y,z) for i_part in range(n_part): part = pysixtrack.Particles() part.x = test_x[i_part] part.y = test_y[i_part] part.tau = test_z[i_part] part.partid = i_part part.state = 1 part.elemid = 0 part.turn = 0 particles.from_pysixtrack(part, i_part) job = st.TrackJob(lattice, particles_set, device=device) tricub_data_buffer = cobjects.CBuffer() tc_data_index = tricub_data_buffer.n_objects tc_data = st.TriCubData(cbuffer=tricub_data_buffer, nx=nx, ny=ny, nz=nz) tc_data.x0 = x0 tc_data.y0 = y0 tc_data.z0 = z0 tc_data.dx = dx tc_data.dy = dy tc_data.dz = dz tc_data.mirror_x = 0 tc_data.mirror_y = 0 tc_data.mirror_z = 0 scale = [1., dx, dy, dz, dx * dy, dx * dz, dy * dz, (dx * dy) * dz]
q0 = lattice.Multipole(knl=[0.0, 0.1]) sc1_index = lattice.cbuffer.n_objects # index of sc1 element sc1 = lattice.SCInterpolatedProfile( number_of_particles=particles.num_particles) dr1 = lattice.Drift(length=1.0) q1 = lattice.Multipole(knl=[0.0, -0.1]) sc2_index = lattice.cbuffer.n_objects # index of sc2 element sc2 = lattice.SCInterpolatedProfile( number_of_particles=particles.num_particles) # -------------------------------------------------------------------------- # D) Create the track-job # Create the track-job job = st.TrackJob(lattice, beam) # job = st.TrackJob(lattice, beam, device="opencl:1.0") # job = st.CudaTrackJob(lattice, beam) # -------------------------------------------------------------------------- # E) Add the interpol_buffer to the track-job. This allows the track job # to push/pull this buffer like the other buffers via the returned id interpol_buffer_id = job.add_stored_buffer(cbuffer=interpol_buffer) print(f"interpol_buffer_id = {interpol_buffer_id}") # -------------------------------------------------------------------------- # F) Create the assignments of the line profile datasets to the space # charge elements. Instead of doing it using the track-job API directly, # we use a convenience function which hides all the gritty details
def track_particle_sixtracklib_firstlast( line, partCO, Dx_wrt_CO_m, Dpx_wrt_CO_rad, Dy_wrt_CO_m, Dpy_wrt_CO_rad, Dsigma_wrt_CO_m, Ddelta_wrt_CO, n_turns, device=None): Dx_wrt_CO_m, Dpx_wrt_CO_rad,\ Dy_wrt_CO_m, Dpy_wrt_CO_rad,\ Dsigma_wrt_CO_m, Ddelta_wrt_CO = vectorize_all_coords( Dx_wrt_CO_m, Dpx_wrt_CO_rad, Dy_wrt_CO_m, Dpy_wrt_CO_rad, Dsigma_wrt_CO_m, Ddelta_wrt_CO) #if type(partCO) is pysixtrack.Particles: # part = partCO.copy() #else: # part = pysixtrack.Particles(**partCO) n_turns_to_store=1000 n_turns_tbt=1000 #skip_turns=1000 import sixtracklib elements=sixtracklib.Elements() #sixtracklib.append_beam_monitors_to_lattice(beam_elements_buffer=elements.cbuffer, # until_turn_elem_by_elem=0, # until_turn_turn_by_turn=n_turns_tbt, # until_turn=n_turns, # skip_turns=skip_turns # ) elements.BeamMonitor(num_stores=n_turns_tbt,start=0,skip=1,is_rolling=False) elements.BeamMonitor(num_stores=n_turns_to_store,start=0,skip=1,is_rolling=True) print(elements.get_elements()) #elements.BeamMonitor(num_stores=n_turns) #elements.BeamMonitor(num_stores=n_turns_to_store) elements.append_line(line) n_stores=elements.get_elements()[1].num_stores n_part = len(Dx_wrt_CO_m) # Build PyST particle ps = sixtracklib.ParticlesSet() p = ps.Particles(num_particles=n_part) for i_part in range(n_part): if type(partCO) is pysixtrack.Particles: part = partCO.copy() else: part = pysixtrack.Particles(**partCO) part.x += Dx_wrt_CO_m[i_part] part.px += Dpx_wrt_CO_rad[i_part] part.y += Dy_wrt_CO_m[i_part] part.py += Dpy_wrt_CO_rad[i_part] part.sigma += Dsigma_wrt_CO_m[i_part] part.delta += Ddelta_wrt_CO[i_part] part.partid = i_part part.state = 1 part.elemid = 0 part.turn = 0 p.from_pysixtrack(part, i_part) if device is None: job = sixtracklib.TrackJob(elements, ps) else: job = sixtracklib.TrackJob(elements, ps, device=device) start_tracking_time = time.time() job.track(n_turns) end_tracking_time = time.time() job.collect() end_collecting_time = time.time() res = job.output print(res.particles[0]) print(res.particles[1]) x_tbt_first = res.particles[0].x.reshape(n_turns_tbt,n_part) px_tbt_first = res.particles[0].px.reshape(n_turns_tbt,n_part) y_tbt_first = res.particles[0].y.reshape(n_turns_tbt,n_part) py_tbt_first = res.particles[0].py.reshape(n_turns_tbt,n_part) zeta_tbt_first = res.particles[0].zeta.reshape(n_turns_tbt,n_part) delta_tbt_first = res.particles[0].delta.reshape(n_turns_tbt,n_part) at_turn_tbt_first = res.particles[0].at_turn.reshape(n_turns_tbt,n_part) state_tbt_first = res.particles[0].state.reshape(n_turns_tbt,n_part) x_tbt_last = res.particles[1].x.reshape(n_stores,n_part) px_tbt_last = res.particles[1].px.reshape(n_stores,n_part) y_tbt_last = res.particles[1].y.reshape(n_stores,n_part) py_tbt_last = res.particles[1].py.reshape(n_stores,n_part) zeta_tbt_last = res.particles[1].zeta.reshape(n_stores,n_part) delta_tbt_last = res.particles[1].delta.reshape(n_stores,n_part) at_turn_tbt_last = res.particles[1].at_turn.reshape(n_stores,n_part) state_tbt_last = res.particles[1].state.reshape(n_stores,n_part) output_dict = {'x_tbt_first' : x_tbt_first, 'px_tbt_first' : px_tbt_first, 'y_tbt_first' : y_tbt_first, 'py_tbt_first' : py_tbt_first, 'zeta_tbt_first' : zeta_tbt_first, 'delta_tbt_first' : delta_tbt_first, 'at_turn_tbt_first' : at_turn_tbt_first, # 'state_tbt_first' : state_tbt_first, 'x_tbt_last' : x_tbt_last, 'px_tbt_last' : px_tbt_last, 'y_tbt_last' : y_tbt_last, 'py_tbt_last' : py_tbt_last, 'zeta_tbt_last' : zeta_tbt_last, 'delta_tbt_last' : delta_tbt_last, 'at_turn_tbt_last' : at_turn_tbt_last, # 'state_tbt_last' : state_tbt_last, 'tracking_time_mins' : (end_tracking_time - start_tracking_time)/60., 'collecting_time_mins' : (end_collecting_time - end_tracking_time)/60., } print('Done loading!') return output_dict
lattice.BeamMonitor() assert lattice.cbuffer.get_object(bm0_index).out_address == 0 assert lattice.cbuffer.get_object(bm1_index).out_address == 0 assert lattice.cbuffer.get_object(bm2_index).out_address == 0 pset = st.ParticlesSet() pset.Particles(num_particles=100) output_buffer = st.ParticlesSet() out_buffer0_index = output_buffer.cbuffer.n_objects output_buffer.Particles(num_particles=100) out_buffer1_index = output_buffer.cbuffer.n_objects output_buffer.Particles(num_particles=512) job = st.TrackJob(lattice, pset, device=node_id_strs[0]) controller = job.controller assert controller.has_selected_node assert controller.selected_node_id_str == node_id_strs[0] # hand the output_buffer over to the track job: output_buffer_id = job.add_stored_buffer(cbuffer=output_buffer) assert output_buffer_id != st_ARCH_ILLEGAL_BUFFER_ID.value # use the predefined lattice_buffer_id value to refer to the # beam elements buffer lattice_buffer_id = st_ARCH_BEAM_ELEMENTS_BUFFER_ID.value # use the _type_id attributes of beam monitors and particle sets to # refer to these object types:
skip=5, out_address=0, max_particle_id=0, min_particle_id=0, is_rolling=True, is_turn_ordered=False) NUM_PARTICLES = 100 pb = pyst.ParticlesSet() pb.Particles(num_particles=100) ptr_pb = st_Buffer_new_mapped_on_cbuffer(pb.cbuffer) ptr_particles = st_Particles_cbuffer_get_particles(pb.cbuffer, 0) assert ptr_particles != st_NullParticles job = pyst.TrackJob(line, pb, 0, "opencl", "0.0") assert job.type_str() == 'opencl' assert job.requires_collecting assert job.has_output_buffer assert not job.has_elem_by_elem_output assert job.has_beam_monitor_output assert job.num_beam_monitors == 2 # Copy the original contents of the line, the particle buffer and the out- # put buffer to a set of different buffers -> so we can keep track of them ptr_pb = st_Buffer_new_mapped_on_cbuffer(pb.cbuffer) assert ptr_pb != st_NullBuffer ptr_line = st_Buffer_new_mapped_on_cbuffer(line.cbuffer) assert ptr_line != st_NullBuffer