def init_all(self): self.n_slices = n_slices self.n_segments = n_segments # define the machine from LHC_custom import LHC self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration) # define MP size nel_mp_ref_0 = init_unif_edens*4*x_aper*y_aper/N_MP_ele_init # prepare e-cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True, L_ecloud=self.machine.circumference/n_segments, slicer=None , Dt_ref=Dt_ref, pyecl_input_folder=pyecl_input_folder, chamb_type = chamb_type, x_aper=x_aper, y_aper=y_aper, filename_chm=filename_chm, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_per_eV*self.machine.p0/e*c) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY(x_aper=ecloud.impact_man.chamb.x_aper, y_aper=ecloud.impact_man.chamb.y_aper) self.machine.one_turn_map.append(apt_xy) n_non_parallelizable = 2 #rf and aperture # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len(self.machine.one_turn_map)-n_non_parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d/%d (worker) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d/%d (master) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) #install eclouds in my part my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: #ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge() ecloud_new = DummyEcloud() my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) self.mypart = my_new_part
y_aper = 20 * sigma_y Dh_sc = 2 * x_aper / 128 # define MP size nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init # define an electron cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT from PyHEADTAIL.particles.slicing import UniformBinSlicer slicer = UniformBinSlicer(n_slices=64, n_sigma_z=2.) ecloud = PyEC4PyHT.Ecloud(L_ecloud=machine.circumference / n_segments, slicer=slicer, Dt_ref=25e-12, pyecl_input_folder='./drift_sim', x_aper=x_aper, y_aper=y_aper, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0) # install ecloud in the machine machine.install_after_each_transverse_segment(ecloud) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY(x_aper=ecloud.cloudsim.chamb.x_aper, y_aper=ecloud.cloudsim.chamb.y_aper) machine.one_turn_map.append(apt_xy)
list_slices += these_slices # Build e-cloud print('Build ecloud...') import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud( L_ecloud=1., slicer=None, slice_by_slice_mode=True, Dt_ref=5e-12, pyecl_input_folder='./pyecloud_config', chamb_type='polyg' , filename_chm='LHC_chm_ver.mat', #init_unif_edens_flag=1, #init_unif_edens=1e7, #N_mp_max = 3000000, #nel_mp_ref_0 = 1e7/(0.7*3000000), #B_multip = [0.], #~ PyPICmode = 'ShortleyWeller_WithTelescopicGrids', #~ f_telescope = 0.3, target_grid={'x_min_target': -5 * list_bunches[-1].sigma_x(), 'x_max_target': 5 * list_bunches[-1].sigma_x(), 'y_min_target': -5 * list_bunches[-1].sigma_y(), 'y_max_target': 5 * list_bunches[-1].sigma_y(), 'Dh_target': .2 * list_bunches[-1].sigma_x()}, #~ N_nodes_discard = 10., #~ N_min_Dh_main = 10, #x_beam_offset = x_beam_offset, #y_beam_offset = y_beam_offset, #probes_position = probes_position, save_pyecl_outp_as='test_saving', sparse_solver='PyKLU') print('Done.') # REMEBMBER TO START POPPING FROM THE RIGHT SIDE print('Start cloud sim')
# initialize ion cloud with single kick per bunch import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud_sk = PyEC4PyHT.Ecloud(L_ecloud=machine.circumference / n_segments, slicer=bunch_slicer, Dt_ref=Dt_ref, pyecl_input_folder='./pyecloud_config', beam_monitor=beam_monitor, chamb_type=chamb_type, PyPICmode='FFT_OpenBoundary', x_aper=x_aper, y_aper=y_aper, filename_chm=filename_chm, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, cloud_mass=ion_mass, cloud_charge=ion_charge, gas_ion_flag=gas_ion_flag, unif_frac=unif_frac, P_nTorr=P_nTorr, sigma_ion_MBarn=sigma_ion_MBarn, Temp_K=Temp_K, E_init_ion=E_init_ion, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_per_eV * machine.p0 / e * c, switch_model='perfect_absorber', kick_mode_for_beam_field=True, verbose=True) # print grid size
pp.N_mp_max_dip = 500000 nel_mp_ref_0 = pp.init_unif_edens_dip * 4 * pp.x_aper * pp.y_aper / pp.N_MP_ele_init_dip ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True, L_ecloud=1., slicer=None, force_interp_at_substeps_interacting_slices=True, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, init_unif_edens_flag=pp.init_unif_edens_flag_dip, init_unif_edens=pp.init_unif_edens_dip, N_mp_max=pp.N_mp_max_dip, nel_mp_ref_0=nel_mp_ref_0, B_multip=[0.], enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y, kick_mode_for_beam_field=False) sc = ecloud.beam_PyPIC_state.pic_internal i_y0 = np.argmin(np.abs(sc.yg))
def init_all(self): print('Exec init...') from LHC_custom import LHC self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration, Qp_x=Qp_x, Qp_y=Qp_y, octupole_knob=octupole_knob) self.n_non_parallelizable = 1 #RF inj_optics = self.machine.transverse_map.get_injection_optics() sigma_x_smooth = np.sqrt(inj_optics['beta_x']*epsn_x/self.machine.betagamma) sigma_y_smooth = np.sqrt(inj_optics['beta_y']*epsn_y/self.machine.betagamma) if flag_aperture: # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY(x_aper=target_size_internal_grid_sigma*sigma_x_smooth, y_aper=target_size_internal_grid_sigma*sigma_x_smooth) self.machine.one_turn_map.append(apt_xy) self.n_non_parallelizable +=1 if enable_transverse_damper: # setup transverse damper from PyHEADTAIL.feedback.transverse_damper import TransverseDamper damper = TransverseDamper(dampingrate_x=dampingrate_x, dampingrate_y=dampingrate_y) self.machine.one_turn_map.append(damper) self.n_non_parallelizable +=1 if enable_ecloud: print('Build ecloud...') import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud( L_ecloud=L_ecloud_tot/n_segments, slicer=None, slice_by_slice_mode=True, Dt_ref=5e-12, pyecl_input_folder='./pyecloud_config', chamb_type = 'polyg' , filename_chm= 'LHC_chm_ver.mat', #init_unif_edens_flag=1, #init_unif_edens=1e7, #N_mp_max = 3000000, #nel_mp_ref_0 = 1e7/(0.7*3000000), #B_multip = [0.], #~ PyPICmode = 'ShortleyWeller_WithTelescopicGrids', #~ f_telescope = 0.3, target_grid = {'x_min_target':-target_size_internal_grid_sigma*sigma_x_smooth, 'x_max_target':target_size_internal_grid_sigma*sigma_x_smooth, 'y_min_target':-target_size_internal_grid_sigma*sigma_y_smooth,'y_max_target':target_size_internal_grid_sigma*sigma_y_smooth, 'Dh_target':.2*sigma_x_smooth}, #~ N_nodes_discard = 10., #~ N_min_Dh_main = 10, #x_beam_offset = x_beam_offset, #y_beam_offset = y_beam_offset, #probes_position = probes_position, save_pyecl_outp_as = 'cloud_evol_ring%d'%self.ring_of_CPUs.myring, save_only = ['lam_t_array', 'nel_hist', 'Nel_timep', 't', 't_hist', 'xg_hist'], sparse_solver = 'PyKLU', enable_kick_x=enable_kick_x, enable_kick_y=enable_kick_y) print('Done.') # split the machine i_end_parallel = len(self.machine.one_turn_map)-self.n_non_parallelizable sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes_per_ring) i_start_part, i_end_part = sharing.my_part(self.ring_of_CPUs.myid_in_ring) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_at_end_ring: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] #install eclouds in my part if enable_ecloud: my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: if ele in self.machine.transverse_map: ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge() # we save buildup info only for the first cloud in each ring if self.ring_of_CPUs.myid_in_ring>0 or len(self.my_list_eclouds)>0: ecloud_new.remove_savers() my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) my_new_part.append(ele) self.mypart = my_new_part print('Hello, I am %d.%d, my part looks like: %s. Saver status: %s'%( self.ring_of_CPUs.myring, self.ring_of_CPUs.myid_in_ring, self.mypart, [(ec.cloudsim.cloud_list[0].pyeclsaver is not None) for ec in self.my_list_eclouds]))
init_unif_edens_flag = 1 init_unif_edens = 1e11 N_MP_ele_init = 100000 N_mp_max = N_MP_ele_init * 4. nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init Dh_ext = 2e-3 ecloud = PyEC4PyHT.Ecloud(L_ecloud=machine.circumference / machine.transverse_map.n_segments, slicer=slicer, Dt_ref=25e-12, pyecl_input_folder='./drift_sim', x_aper=x_aper, y_aper=y_aper, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip, PyPICmode='ShortleyWeller_WithTelescopicGrids', f_telescope=0.3, Dh_sc=Dh_ext, target_grid={'x_min_target': -4 * bunch.sigma_x(), 'x_max_target': 4 * bunch.sigma_x(), 'y_min_target': -4 * bunch.sigma_y(), 'y_max_target': 4 * bunch.sigma_y(), 'Dh_target': Dh_sc}, N_nodes_discard=10., N_min_Dh_main=10,) # generate a bunch bunch_for_map = machine.generate_6D_Gaussian_bunch(n_macroparticles=500000, intensity=1.15e11, epsn_x=epsn_x, epsn_y=epsn_y, sigma_z=0.2) ecloud.track_once_and_replace_with_recorded_field_map(bunch_for_map) machine.install_after_each_transverse_segment(ecloud)
init_unif_edens = 1e11 N_MP_ele_init = 100000 N_mp_max = N_MP_ele_init * 4. nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init new_one_turn_map = [] ecloud_list = [] for ele in machine.one_turn_map: new_one_turn_map.append(ele) if ele in machine.transverse_map: new_ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True, L_ecloud=machine.circumference / machine.transverse_map.n_segments, slicer=None, Dt_ref=25e-12, pyecl_input_folder='./drift_sim', x_aper=x_aper, y_aper=y_aper, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip) new_one_turn_map.append(new_ecloud) ecloud_list.append(new_ecloud) machine.one_turn_map = new_one_turn_map # generate a bunch bunch_for_map = machine.generate_6D_Gaussian_bunch(n_macroparticles=500000, intensity=1.15e11, epsn_x=epsn_x, epsn_y=epsn_y, sigma_z=0.2) slices_list_for_map = bunch.extract_slices(slicer) for ec in ecloud_list:
def init_all(self): n_slices = 100 z_cut = 2.5e-9 * c self.n_slices = n_slices self.z_cut = z_cut n_segments = 70 from LHC import LHC self.machine = LHC(machine_configuration='Injection', n_segments=n_segments, D_x=0., RF_at='end_of_transverse') # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len( self.machine.one_turn_map) - 1 #only RF is not parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d (worker) and my part is %d long' % ( myid, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d (master) and my part is %d long' % ( myid, len(self.mypart)) # config e-cloud chamb_type = 'polyg' x_aper = 2.300000e-02 y_aper = 1.800000e-02 filename_chm = '../pyecloud_config/LHC_chm_ver.mat' B_multip_per_eV = [1.190000e-12] B_multip_per_eV = np.array(B_multip_per_eV) fraction_device = 0.65 intensity = 1.150000e+11 epsn_x = 2.5e-6 epsn_y = 2.5e-6 init_unif_edens_flag = 1 init_unif_edens = 9.000000e+11 N_MP_ele_init = 100000 N_mp_max = N_MP_ele_init * 4. Dh_sc = .2e-3 nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud( L_ecloud=self.machine.circumference / n_segments, slicer=None, Dt_ref=10e-12, pyecl_input_folder='../pyecloud_config', chamb_type=chamb_type, x_aper=x_aper, y_aper=y_aper, filename_chm=filename_chm, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_per_eV * self.machine.p0 / e * c, slice_by_slice_mode=True) my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge( ) my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) self.mypart = my_new_part
{'x' : x_beam_offset, 'y': y_beam_offset + (2 * Dy_probe)}, {'x' : x_beam_offset, 'y': y_beam_offset - (2 * Dy_probe)}, {'x' : x_beam_offset + (2 * Dx_probe), 'y': y_beam_offset}, {'x' : x_beam_offset - (2 * Dx_probe), 'y': y_beam_offset}] n_probes = len(probes_position) import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud_singlegrid = PyEC4PyHT.Ecloud( L_ecloud=L_ecloud, slicer=slicer, Dt_ref=20e-12, pyecl_input_folder='./pyecloud_config_LHC', chamb_type='polyg' , filename_chm='LHC_chm_ver.mat', Dh_sc=.2 * bunch.sigma_x(), init_unif_edens_flag=1, init_unif_edens=1e7, N_mp_max=3000000, nel_mp_ref_0=1e7 / (0.7 * 3000000), B_multip=[0.], x_beam_offset=x_beam_offset, y_beam_offset=y_beam_offset, probes_position=probes_position, sparse_solver=sparse_solver) ecloud_multigrid = PyEC4PyHT.Ecloud( L_ecloud=L_ecloud, slicer=slicer, Dt_ref=20e-12, pyecl_input_folder='./pyecloud_config_LHC', chamb_type='polyg' , filename_chm='LHC_chm_ver.mat', Dh_sc=1e-3, init_unif_edens_flag=1, init_unif_edens=1e7, N_mp_max=3000000,
machine_configuration = 'HLLHC-injection' machine = LHC(n_segments=1, machine_configuration=machine_configuration) bunch = machine.generate_6D_Gaussian_bunch(n_macroparticles=300000, intensity=1.15e11, epsn_x=2.5e-6, epsn_y=2.5e-6, sigma_z=0.11) bunch.x[bunch.z < 5e-2] += 1e-3 ecloud_ele = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=1., slicer=None, Dt_ref=25e-12, pyecl_input_folder='pyecloud_config', ) n_slices = 150 z_cut = 2.5e-9 / 2 * clight slicer = UniformBinSlicer(n_slices=n_slices, z_cuts=(-z_cut, z_cut)) slices_list_for_map = bunch.extract_slices(slicer) ecloud_ele.save_ele_distributions_last_track = True ecloud_ele.save_ele_field = True ecloud_ele._reinitialize() z_centers = []
def init_all(self): self.n_slices = n_slices self.n_segments = n_segments from machines_for_testing import SPS self.machine = SPS(n_segments = n_segments, machine_configuration = 'Q20-injection', accQ_x=20., accQ_y=20., RF_at='end_of_transverse') # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len(self.machine.one_turn_map)-1 #only RF is not parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d (worker) and my part is %d long'%(myid, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d (master) and my part is %d long'%(myid, len(self.mypart)) # config e-cloud init_unif_edens_flag=1 init_unif_edens=2e11 N_MP_ele_init = 100000 N_mp_max = N_MP_ele_init*4. # define apertures and Dh_sc to simulate headtail inj_optics = self.machine.transverse_map.get_injection_optics() sigma_x = np.sqrt(inj_optics['beta_x']*epsn_x/self.machine.betagamma) sigma_y = np.sqrt(inj_optics['beta_y']*epsn_y/self.machine.betagamma) x_aper = 20*sigma_x y_aper = 20*sigma_y Dh_sc = 2*x_aper/128/2 # initial MP size nel_mp_ref_0 = init_unif_edens*4*x_aper*y_aper/N_MP_ele_init import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True, L_ecloud=self.machine.circumference/n_segments, slicer=None, Dt_ref=25e-12, pyecl_input_folder='../../PyECLOUD/testing/tests_PyEC4PyHT/drift_sim/', x_aper=x_aper, y_aper=y_aper, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_MP_ele_init=N_MP_ele_init, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip) my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge() my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) self.mypart = my_new_part
import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud_multigrid = PyEC4PyHT.Ecloud( L_ecloud=L_ecloud, slicer=slicer, Dt_ref=20e-12, pyecl_input_folder='./pyecloud_config_LHC', chamb_type='polyg', filename_chm='LHC_chm_ver.mat', #chamb_type='ellip', x_aper=2e-2, y_aper=2e-2, Dh_sc=1e-3, init_unif_edens_flag=1, init_unif_edens=1e7, N_mp_max=3000000, nel_mp_ref_0=1e7 * np.pi * 2e-2**2 / (0.7 * 3000000), B_multip=[0.], PyPICmode='ShortleyWeller_WithTelescopicGrids', f_telescope=0.3, target_grid={ 'x_min_target': -5 * bunch.sigma_x(), 'x_max_target': 5 * bunch.sigma_x(), 'y_min_target': -5 * bunch.sigma_y(), 'y_max_target': 5 * bunch.sigma_y(), 'Dh_target': .1 * bunch.sigma_x() }, N_nodes_discard=10., N_min_Dh_main=10, sparse_solver=sparse_solver, verbose=True) ecloud_multigrid.save_ele_distributions_last_track = True
def init_all(self): self.n_slices = pp.n_slices # read the optics if needed if pp.optics_pickle_file is not None: with open(pp.optics_pickle_file) as fid: optics = pickle.load(fid) self.n_kick_smooth = np.sum( ['_kick_smooth_' in nn for nn in optics['name']]) else: optics = None self.n_kick_smooth = pp.n_segments # define the machine from LHC_custom import LHC self.machine = LHC(n_segments=pp.n_segments, machine_configuration=pp.machine_configuration, beta_x=pp.beta_x, beta_y=pp.beta_y, accQ_x=pp.Q_x, accQ_y=pp.Q_y, Qp_x=pp.Qp_x, Qp_y=pp.Qp_y, octupole_knob=pp.octupole_knob, optics_dict=optics, V_RF=pp.V_RF) self.n_segments = self.machine.transverse_map.n_segments # compute sigma inj_opt = self.machine.transverse_map.get_injection_optics() sigma_x_inj = np.sqrt(inj_opt['beta_x'] * pp.epsn_x / self.machine.betagamma) sigma_y_inj = np.sqrt(inj_opt['beta_y'] * pp.epsn_y / self.machine.betagamma) if pp.optics_pickle_file is None: sigma_x_smooth = sigma_x_inj sigma_y_smooth = sigma_y_inj else: beta_x_smooth = None beta_y_smooth = None for ele in self.machine.one_turn_map: if ele in self.machine.transverse_map: if '_kick_smooth_' in ele.name1: if beta_x_smooth is None: beta_x_smooth = ele.beta_x1 beta_y_smooth = ele.beta_y1 else: if beta_x_smooth != ele.beta_x1 or beta_y_smooth != ele.beta_y1: raise ValueError( 'Smooth kicks must have all the same beta') if beta_x_smooth is None: sigma_x_smooth = None sigma_y_smooth = None else: sigma_x_smooth = np.sqrt(beta_x_smooth * pp.epsn_x / self.machine.betagamma) sigma_y_smooth = np.sqrt(beta_y_smooth * pp.epsn_y / self.machine.betagamma) # define MP size nel_mp_ref_0 = pp.init_unif_edens_dip * 4 * pp.x_aper * pp.y_aper / pp.N_MP_ele_init_dip # prepare e-cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT if pp.custom_target_grid_arcs is not None: target_grid_arcs = pp.custom_target_grid_arcs else: target_grid_arcs = { 'x_min_target': -pp.target_size_internal_grid_sigma * sigma_x_smooth, 'x_max_target': pp.target_size_internal_grid_sigma * sigma_x_smooth, 'y_min_target': -pp.target_size_internal_grid_sigma * sigma_y_smooth, 'y_max_target': pp.target_size_internal_grid_sigma * sigma_y_smooth, 'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_x_smooth } self.target_grid_arcs = target_grid_arcs if pp.enable_arc_dip: ecloud_dip = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / self.n_kick_smooth * pp.fraction_device_dip, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, init_unif_edens_flag=pp.init_unif_edens_flag_dip, init_unif_edens=pp.init_unif_edens_dip, N_mp_max=pp.N_mp_max_dip, nel_mp_ref_0=nel_mp_ref_0, B_multip=pp.B_multip_dip, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) if pp.enable_arc_quad: ecloud_quad = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / self.n_kick_smooth * pp.fraction_device_quad, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, N_mp_max=pp.N_mp_max_quad, nel_mp_ref_0=nel_mp_ref_0, B_multip=pp.B_multip_quad, filename_init_MP_state=pp.filename_init_MP_state_quad, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_dip: with open('multigrid_config_dip.txt', 'w') as fid: if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'): fid.write(repr(ecloud_dip.spacech_ele.PyPICobj.grids)) else: fid.write("Single grid.") with open('multigrid_config_dip.pkl', 'w') as fid: if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'): pickle.dump(ecloud_dip.spacech_ele.PyPICobj.grids, fid) else: pickle.dump('Single grid.', fid) if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_quad: with open('multigrid_config_quad.txt', 'w') as fid: if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'): fid.write(repr(ecloud_quad.spacech_ele.PyPICobj.grids)) else: fid.write("Single grid.") with open('multigrid_config_quad.pkl', 'w') as fid: if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'): pickle.dump(ecloud_quad.spacech_ele.PyPICobj.grids, fid) else: pickle.dump('Single grid.', fid) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY( x_aper=pp.target_size_internal_grid_sigma * sigma_x_inj, y_aper=pp.target_size_internal_grid_sigma * sigma_y_inj) self.machine.one_turn_map.append(apt_xy) if pp.enable_transverse_damper: # setup transverse damper from PyHEADTAIL.feedback.transverse_damper import TransverseDamper damper = TransverseDamper(dampingrate_x=pp.dampingrate_x, dampingrate_y=pp.dampingrate_y) self.machine.one_turn_map.append(damper) # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len( self.machine.one_turn_map) - pp.n_non_parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d/%d (worker) and my part is %d long' % ( myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d/%d (master) and my part is %d long' % ( myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) #install eclouds in my part my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: if pp.optics_pickle_file is None or '_kick_smooth_' in ele.name1: if pp.enable_arc_dip: ecloud_dip_new = ecloud_dip.generate_twin_ecloud_with_shared_space_charge( ) my_new_part.append(ecloud_dip_new) self.my_list_eclouds.append(ecloud_dip_new) if pp.enable_arc_quad: ecloud_quad_new = ecloud_quad.generate_twin_ecloud_with_shared_space_charge( ) my_new_part.append(ecloud_quad_new) self.my_list_eclouds.append(ecloud_quad_new) elif '_kick_element_' in ele.name1 and pp.enable_eclouds_at_kick_elements: i_in_optics = list(optics['name']).index(ele.name1) kick_name = optics['name'][i_in_optics] element_name = kick_name.split('_kick_element_')[-1] L_curr = optics['L_interaction'][i_in_optics] buildup_folder = pp.path_buildup_simulations_kick_elements.replace( '!!!NAME!!!', element_name) chamber_fname = '%s_chamber.mat' % (element_name) B_multip_curr = [0., optics['gradB'][i_in_optics]] x_beam_offset = optics['x'][i_in_optics] * pp.orbit_factor y_beam_offset = optics['y'][i_in_optics] * pp.orbit_factor sigma_x_local = np.sqrt(optics['beta_x'][i_in_optics] * pp.epsn_x / self.machine.betagamma) sigma_y_local = np.sqrt(optics['beta_y'][i_in_optics] * pp.epsn_y / self.machine.betagamma) ecloud_ele = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=L_curr, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type='polyg', x_aper=None, y_aper=None, filename_chm=buildup_folder + '/' + chamber_fname, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid={ 'x_min_target': -pp.target_size_internal_grid_sigma * sigma_x_local + x_beam_offset, 'x_max_target': pp.target_size_internal_grid_sigma * sigma_x_local + x_beam_offset, 'y_min_target': -pp.target_size_internal_grid_sigma * sigma_y_local + y_beam_offset, 'y_max_target': pp.target_size_internal_grid_sigma * sigma_y_local + y_beam_offset, 'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_y_local }, N_mp_max=pp.N_mp_max_quad, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_curr, filename_init_MP_state=buildup_folder + '/' + pp.name_MP_state_file_kick_elements, x_beam_offset=x_beam_offset, y_beam_offset=y_beam_offset, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) my_new_part.append(ecloud_ele) self.my_list_eclouds.append(ecloud_ele) self.mypart = my_new_part if pp.footprint_mode: print 'Proc. %d computing maps' % myid # generate a bunch bunch_for_map = self.machine.generate_6D_Gaussian_bunch_matched( n_macroparticles=pp.n_macroparticles_for_footprint_map, intensity=pp.intensity, epsn_x=pp.epsn_x, epsn_y=pp.epsn_y, sigma_z=pp.sigma_z) # Slice the bunch slicer_for_map = UniformBinSlicer(n_slices=pp.n_slices, z_cuts=(-pp.z_cut, pp.z_cut)) slices_list_for_map = bunch_for_map.extract_slices(slicer_for_map) #Track the previous part of the machine for ele in self.machine.one_turn_map[:i_start_part]: for ss in slices_list_for_map: ele.track(ss) # Measure optics, track and replace clouds with maps list_ele_type = [] list_meas_beta_x = [] list_meas_alpha_x = [] list_meas_beta_y = [] list_meas_alpha_y = [] for ele in self.mypart: list_ele_type.append(str(type(ele))) # Measure optics bbb = sum(slices_list_for_map) list_meas_beta_x.append(bbb.beta_Twiss_x()) list_meas_alpha_x.append(bbb.alpha_Twiss_x()) list_meas_beta_y.append(bbb.beta_Twiss_y()) list_meas_alpha_y.append(bbb.alpha_Twiss_y()) if ele in self.my_list_eclouds: ele.track_once_and_replace_with_recorded_field_map( slices_list_for_map) else: for ss in slices_list_for_map: ele.track(ss) print 'Proc. %d done with maps' % myid with open('measured_optics_%d.pkl' % myid, 'wb') as fid: pickle.dump( { 'ele_type': list_ele_type, 'beta_x': list_meas_beta_x, 'alpha_x': list_meas_alpha_x, 'beta_y': list_meas_beta_y, 'alpha_y': list_meas_alpha_y, }, fid) #remove RF if self.ring_of_CPUs.I_am_the_master: self.non_parallel_part.remove(self.machine.longitudinal_map)
def _generate_parent_eclouds(self): pp = self.pp sigma_x_inj = self.sigma_x_inj sigma_y_inj = self.sigma_y_inj sigma_x_smooth = self.sigma_x_smooth sigma_y_smooth = self.sigma_y_smooth # prepare e-cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT if pp.custom_target_grid_arcs is not None: target_grid_arcs = pp.custom_target_grid_arcs else: target_grid_arcs = { "x_min_target": -pp.target_size_internal_grid_sigma * sigma_x_smooth, "x_max_target": pp.target_size_internal_grid_sigma * sigma_x_smooth, "y_min_target": -pp.target_size_internal_grid_sigma * sigma_y_smooth, "y_max_target": pp.target_size_internal_grid_sigma * sigma_y_smooth, "Dh_target": pp.target_Dh_internal_grid_sigma * sigma_x_smooth, } self.target_grid_arcs = target_grid_arcs self.parent_eclouds = [] nel_mp_ref_0 = (pp.init_unif_edens_dip * 4 * pp.x_aper * pp.y_aper / pp.N_MP_ele_init_dip) if pp.enable_arc_dip: # define MP size ecloud_dip = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / self.n_kick_smooth * pp.fraction_device_dip, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, init_unif_edens_flag=pp.init_unif_edens_flag_dip, init_unif_edens=pp.init_unif_edens_dip, N_mp_max=pp.N_mp_max_dip, nel_mp_ref_0=nel_mp_ref_0, B_multip=pp.B_multip_dip, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y, force_interp_at_substeps_interacting_slices=pp. force_interp_at_substeps_interacting_slices, ) self.parent_eclouds.append(ecloud_dip) if pp.enable_arc_quad: ecloud_quad = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / self.n_kick_smooth * pp.fraction_device_quad, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, N_mp_max=pp.N_mp_max_quad, nel_mp_ref_0=nel_mp_ref_0, B_multip=pp.B_multip_quad, filename_init_MP_state=pp.filename_init_MP_state_quad, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y, force_interp_at_substeps_interacting_slices=pp. force_interp_at_substeps_interacting_slices, ) self.parent_eclouds.append(ecloud_quad) if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_dip: with open("multigrid_config_dip.txt", "w") as fid: if hasattr(ecloud_dip.spacech_ele.PyPICobj, "grids"): fid.write(repr(ecloud_dip.spacech_ele.PyPICobj.grids)) else: fid.write("Single grid.") with open("multigrid_config_dip.pkl", "wb") as fid: if hasattr(ecloud_dip.spacech_ele.PyPICobj, "grids"): pickle.dump(ecloud_dip.spacech_ele.PyPICobj.grids, fid) else: pickle.dump("Single grid.", fid) if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_quad: with open("multigrid_config_quad.txt", "w") as fid: if hasattr(ecloud_quad.spacech_ele.PyPICobj, "grids"): fid.write(repr(ecloud_quad.spacech_ele.PyPICobj.grids)) else: fid.write("Single grid.") with open("multigrid_config_quad.pkl", "wb") as fid: if hasattr(ecloud_quad.spacech_ele.PyPICobj, "grids"): pickle.dump(ecloud_quad.spacech_ele.PyPICobj.grids, fid) else: pickle.dump("Single grid.", fid)
nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init new_one_turn_map = [] ecloud_list = [] for ele in machine.one_turn_map: new_one_turn_map.append(ele) if ele in machine.transverse_map: new_ecloud = PyEC4PyHT.Ecloud( L_ecloud=machine.circumference / N_kicks, slicer=slicer, Dt_ref=25e-12, pyecl_input_folder='./drift_sim', x_aper=x_aper, y_aper=y_aper, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_MP_ele_init=N_MP_ele_init, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip, slice_by_slice_mode=True) new_one_turn_map.append(new_ecloud) ecloud_list.append(new_ecloud) machine.one_turn_map = new_one_turn_map # generate a bunch bunch = machine.generate_6D_Gaussian_bunch(n_macroparticles=30000, intensity=1.15e11,
def _install_eclouds_in_machine_part(self): # install eclouds in my part my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: if not self.optics_from_pickle or "_kick_smooth_" in ele.name1: for ee in self.parent_eclouds: ecloud_new = ( ee.generate_twin_ecloud_with_shared_space_charge()) my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) elif ("_kick_element_" in ele.name1 and pp.enable_eclouds_at_kick_elements): i_in_optics = list(optics["name"]).index(ele.name1) kick_name = optics["name"][i_in_optics] element_name = kick_name.split("_kick_element_")[-1] L_curr = optics["L_interaction"][i_in_optics] buildup_folder = pp.path_buildup_simulations_kick_elements.replace( "!!!NAME!!!", element_name) chamber_fname = "%s_chamber.mat" % (element_name) B_multip_curr = [0.0, optics["gradB"][i_in_optics]] x_beam_offset = optics["x"][i_in_optics] * pp.orbit_factor y_beam_offset = optics["y"][i_in_optics] * pp.orbit_factor sigma_x_local = np.sqrt(optics["beta_x"][i_in_optics] * pp.epsn_x / self.machine.betagamma) sigma_y_local = np.sqrt(optics["beta_y"][i_in_optics] * pp.epsn_y / self.machine.betagamma) ecloud_ele = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=L_curr, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type="polyg", x_aper=None, y_aper=None, filename_chm=buildup_folder + "/" + chamber_fname, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid={ "x_min_target": -pp.target_size_internal_grid_sigma * sigma_x_local + x_beam_offset, "x_max_target": pp.target_size_internal_grid_sigma * sigma_x_local + x_beam_offset, "y_min_target": -pp.target_size_internal_grid_sigma * sigma_y_local + y_beam_offset, "y_max_target": pp.target_size_internal_grid_sigma * sigma_y_local + y_beam_offset, "Dh_target": pp.target_Dh_internal_grid_sigma * sigma_y_local, }, N_mp_max=pp.N_mp_max_quad, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_curr, filename_init_MP_state=buildup_folder + "/" + pp.name_MP_state_file_kick_elements, x_beam_offset=x_beam_offset, y_beam_offset=y_beam_offset, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y, force_interp_at_substeps_interacting_slices=pp. force_interp_at_substeps_interacting_slices, ) my_new_part.append(ecloud_ele) self.my_list_eclouds.append(ecloud_ele) self.mypart = my_new_part
Dh_sc = .2e-3 # define MP size nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init # define an electron cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT from PyHEADTAIL.particles.slicing import UniformBinSlicer slicer = UniformBinSlicer(n_slices=64, n_sigma_z=2.) ecloud = PyEC4PyHT.Ecloud(L_ecloud=machine.circumference / n_segments, slicer=slicer, Dt_ref=10e-12, pyecl_input_folder='./pyecloud_config', chamb_type=chamb_type, x_aper=x_aper, y_aper=y_aper, filename_chm=filename_chm, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_per_eV * machine.p0 / e * c) # install ecloud in the machine machine.install_after_each_transverse_segment(ecloud) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY(x_aper=ecloud.impact_man.chamb.x_aper, y_aper=ecloud.impact_man.chamb.y_aper) machine.one_turn_map.append(apt_xy)
from PyHEADTAIL.particles.slicing import UniformBinSlicer slicer = UniformBinSlicer(n_slices = 64, n_sigma_z = 3.) init_unif_edens_flag=1 init_unif_edens=2e11 N_MP_ele_init = 100000 N_mp_max = N_MP_ele_init*4. nel_mp_ref_0 = init_unif_edens*4*x_aper*y_aper/N_MP_ele_init ecloud = PyEC4PyHT.Ecloud(L_ecloud=machine.circumference/N_kicks, slicer=slicer, Dt_ref=25e-12, pyecl_input_folder='../../PyECLOUD/testing/tests_PyEC4PyHT/drift_sim/', x_aper=x_aper, y_aper=y_aper, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_MP_ele_init=N_MP_ele_init, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip) machine.install_after_each_transverse_segment(ecloud) if show_movie: ecloud.save_ele_distributions_last_track = True ecloud.save_ele_potential_and_field = True # generate a bunch bunch = machine.generate_6D_Gaussian_bunch(n_macroparticles=300000, intensity=1.15e11, epsn_x=epsn_x, epsn_y=epsn_y, sigma_z=0.2) # replace first particles with HEADTAIL ones