def init_all(self): self.n_slices = n_slices self.n_segments = n_segments # define the machine from LHC_custom import LHC self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration) # define MP size nel_mp_ref_0 = init_unif_edens*4*x_aper*y_aper/N_MP_ele_init # prepare e-cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True, L_ecloud=self.machine.circumference/n_segments, slicer=None , Dt_ref=Dt_ref, pyecl_input_folder=pyecl_input_folder, chamb_type = chamb_type, x_aper=x_aper, y_aper=y_aper, filename_chm=filename_chm, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_per_eV*self.machine.p0/e*c) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY(x_aper=ecloud.impact_man.chamb.x_aper, y_aper=ecloud.impact_man.chamb.y_aper) self.machine.one_turn_map.append(apt_xy) n_non_parallelizable = 2 #rf and aperture # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len(self.machine.one_turn_map)-n_non_parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d/%d (worker) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d/%d (master) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) #install eclouds in my part my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: #ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge() ecloud_new = DummyEcloud() my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) self.mypart = my_new_part
N_mp_max = N_MP_ele_init * 4. x_kick_in_sigmas = 0.1 y_kick_in_sigmas = 0.1 chamb_type = 'polyg' x_aper = 2.300000e-02 y_aper = 1.800000e-02 filename_chm = 'LHC_chm_ver.mat' B_multip_per_eV = [1.190000e-12] B_multip_per_eV = np.array(B_multip_per_eV) # define the machine from LHC_custom import LHC machine = LHC(n_segments=n_segments, machine_configuration=machine_configuration) # compute sigma x and y inj_opt = machine.transverse_map.get_injection_optics() sigma_x = np.sqrt(inj_opt['beta_x'] * epsn_x / machine.betagamma) sigma_y = np.sqrt(inj_opt['beta_y'] * epsn_y / machine.betagamma) x_kick = x_kick_in_sigmas * sigma_x y_kick = y_kick_in_sigmas * sigma_y # define PIC grid size Dh_sc = .2e-3 # define MP size nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init
class Simulation(object): def __init__(self): self.N_turns = N_turns def init_all(self): self.n_slices = n_slices self.n_segments = n_segments # define the machine from LHC_custom import LHC self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration) # define MP size nel_mp_ref_0 = init_unif_edens*4*x_aper*y_aper/N_MP_ele_init # prepare e-cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True, L_ecloud=self.machine.circumference/n_segments, slicer=None , Dt_ref=Dt_ref, pyecl_input_folder=pyecl_input_folder, chamb_type = chamb_type, x_aper=x_aper, y_aper=y_aper, filename_chm=filename_chm, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_per_eV*self.machine.p0/e*c) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY(x_aper=x_aper, y_aper=y_aper) self.machine.one_turn_map.append(apt_xy) n_non_parallelizable = 2 #rf and aperture # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len(self.machine.one_turn_map)-n_non_parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d/%d (worker) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d/%d (master) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) #install eclouds in my part my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge() my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) self.mypart = my_new_part def init_master(self): # generate a bunch bunch = self.machine.generate_6D_Gaussian_bunch_matched( n_macroparticles=n_macroparticles, intensity=intensity, epsn_x=epsn_x, epsn_y=epsn_y, sigma_z=sigma_z) print 'Bunch initialized.' # initial slicing from PyHEADTAIL.particles.slicing import UniformBinSlicer self.slicer = UniformBinSlicer(n_slices = n_slices, n_sigma_z = n_sigma_z) # compute initial displacements inj_opt = self.machine.transverse_map.get_injection_optics() sigma_x = np.sqrt(inj_opt['beta_x']*epsn_x/self.machine.betagamma) sigma_y = np.sqrt(inj_opt['beta_y']*epsn_y/self.machine.betagamma) x_kick = x_kick_in_sigmas*sigma_x y_kick = y_kick_in_sigmas*sigma_y # apply initial displacement bunch.x += x_kick bunch.y += y_kick # define a bunch monitor from PyHEADTAIL.monitors.monitors import BunchMonitor self.bunch_monitor = BunchMonitor('bunch_evolution', N_turns, {'Comment':'PyHDTL simulation'}, write_buffer_every = 8) #slice for the first turn slice_obj_list = bunch.extract_slices(self.slicer) pieces_to_be_treated = slice_obj_list print 'N_turns', self.N_turns return pieces_to_be_treated def init_worker(self): pass def treat_piece(self, piece): for ele in self.mypart: ele.track(piece) def finalize_turn_on_master(self, pieces_treated): # re-merge bunch bunch = sum(pieces_treated) #finalize present turn (with non parallel part, e.g. synchrotron motion) for ele in self.non_parallel_part: ele.track(bunch) # save results #print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn) self.bunch_monitor.dump(bunch) # prepare next turn (re-slice) new_pieces_to_be_treated = bunch.extract_slices(self.slicer) orders_to_pass = ['reset_clouds'] return orders_to_pass, new_pieces_to_be_treated def execute_orders_from_master(self, orders_from_master): if 'reset_clouds' in orders_from_master: for ec in self.my_list_eclouds: ec.finalize_and_reinitialize() def finalize_simulation(self): pass def piece_to_buffer(self, piece): buf = ch.beam_2_buffer(piece) return buf def buffer_to_piece(self, buf): piece = ch.buffer_2_beam(buf) return piece
chamb_type = 'polyg' x_aper = 2.300000e-02 y_aper = 1.800000e-02 filename_chm = 'LHC_chm_ver.mat' B_multip_per_eV = [1.190000e-12] B_multip_per_eV = np.array(B_multip_per_eV) # define the machine from LHC_custom import LHC machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration) # compute sigma x and y sigma_x = np.sqrt(machine.beta_x[0]*epsn_x/machine.betagamma) sigma_y = np.sqrt(machine.beta_y[0]*epsn_y/machine.betagamma) x_kick = x_kick_in_sigmas*sigma_x y_kick = y_kick_in_sigmas*sigma_y # define PIC grid size Dh_sc = .2e-3 # define MP size nel_mp_ref_0 = init_unif_edens*4*x_aper*y_aper/N_MP_ele_init # define an electron cloud
def init_all(self): self.n_slices = pp.n_slices # read the optics if needed if pp.optics_pickle_file is not None: with open(pp.optics_pickle_file) as fid: optics = pickle.load(fid) self.n_kick_smooth = np.sum( ['_kick_smooth_' in nn for nn in optics['name']]) else: optics = None self.n_kick_smooth = pp.n_segments # define the machine from LHC_custom import LHC self.machine = LHC(n_segments=pp.n_segments, machine_configuration=pp.machine_configuration, beta_x=pp.beta_x, beta_y=pp.beta_y, accQ_x=pp.Q_x, accQ_y=pp.Q_y, Qp_x=pp.Qp_x, Qp_y=pp.Qp_y, octupole_knob=pp.octupole_knob, optics_dict=optics, V_RF=pp.V_RF) self.n_segments = self.machine.transverse_map.n_segments # compute sigma inj_opt = self.machine.transverse_map.get_injection_optics() sigma_x_inj = np.sqrt(inj_opt['beta_x'] * pp.epsn_x / self.machine.betagamma) sigma_y_inj = np.sqrt(inj_opt['beta_y'] * pp.epsn_y / self.machine.betagamma) if pp.optics_pickle_file is None: sigma_x_smooth = sigma_x_inj sigma_y_smooth = sigma_y_inj else: beta_x_smooth = None beta_y_smooth = None for ele in self.machine.one_turn_map: if ele in self.machine.transverse_map: if '_kick_smooth_' in ele.name1: if beta_x_smooth is None: beta_x_smooth = ele.beta_x1 beta_y_smooth = ele.beta_y1 else: if beta_x_smooth != ele.beta_x1 or beta_y_smooth != ele.beta_y1: raise ValueError( 'Smooth kicks must have all the same beta') if beta_x_smooth is None: sigma_x_smooth = None sigma_y_smooth = None else: sigma_x_smooth = np.sqrt(beta_x_smooth * pp.epsn_x / self.machine.betagamma) sigma_y_smooth = np.sqrt(beta_y_smooth * pp.epsn_y / self.machine.betagamma) # define MP size nel_mp_ref_0 = pp.init_unif_edens_dip * 4 * pp.x_aper * pp.y_aper / pp.N_MP_ele_init_dip # prepare e-cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT if pp.custom_target_grid_arcs is not None: target_grid_arcs = pp.custom_target_grid_arcs else: target_grid_arcs = { 'x_min_target': -pp.target_size_internal_grid_sigma * sigma_x_smooth, 'x_max_target': pp.target_size_internal_grid_sigma * sigma_x_smooth, 'y_min_target': -pp.target_size_internal_grid_sigma * sigma_y_smooth, 'y_max_target': pp.target_size_internal_grid_sigma * sigma_y_smooth, 'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_x_smooth } self.target_grid_arcs = target_grid_arcs if pp.enable_arc_dip: ecloud_dip = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / self.n_kick_smooth * pp.fraction_device_dip, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, init_unif_edens_flag=pp.init_unif_edens_flag_dip, init_unif_edens=pp.init_unif_edens_dip, N_mp_max=pp.N_mp_max_dip, nel_mp_ref_0=nel_mp_ref_0, B_multip=pp.B_multip_dip, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) if pp.enable_arc_quad: ecloud_quad = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / self.n_kick_smooth * pp.fraction_device_quad, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, N_mp_max=pp.N_mp_max_quad, nel_mp_ref_0=nel_mp_ref_0, B_multip=pp.B_multip_quad, filename_init_MP_state=pp.filename_init_MP_state_quad, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_dip: with open('multigrid_config_dip.txt', 'w') as fid: if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'): fid.write(repr(ecloud_dip.spacech_ele.PyPICobj.grids)) else: fid.write("Single grid.") with open('multigrid_config_dip.pkl', 'w') as fid: if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'): pickle.dump(ecloud_dip.spacech_ele.PyPICobj.grids, fid) else: pickle.dump('Single grid.', fid) if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_quad: with open('multigrid_config_quad.txt', 'w') as fid: if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'): fid.write(repr(ecloud_quad.spacech_ele.PyPICobj.grids)) else: fid.write("Single grid.") with open('multigrid_config_quad.pkl', 'w') as fid: if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'): pickle.dump(ecloud_quad.spacech_ele.PyPICobj.grids, fid) else: pickle.dump('Single grid.', fid) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY( x_aper=pp.target_size_internal_grid_sigma * sigma_x_inj, y_aper=pp.target_size_internal_grid_sigma * sigma_y_inj) self.machine.one_turn_map.append(apt_xy) if pp.enable_transverse_damper: # setup transverse damper from PyHEADTAIL.feedback.transverse_damper import TransverseDamper damper = TransverseDamper(dampingrate_x=pp.dampingrate_x, dampingrate_y=pp.dampingrate_y) self.machine.one_turn_map.append(damper) # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len( self.machine.one_turn_map) - pp.n_non_parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d/%d (worker) and my part is %d long' % ( myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d/%d (master) and my part is %d long' % ( myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) #install eclouds in my part my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: if pp.optics_pickle_file is None or '_kick_smooth_' in ele.name1: if pp.enable_arc_dip: ecloud_dip_new = ecloud_dip.generate_twin_ecloud_with_shared_space_charge( ) my_new_part.append(ecloud_dip_new) self.my_list_eclouds.append(ecloud_dip_new) if pp.enable_arc_quad: ecloud_quad_new = ecloud_quad.generate_twin_ecloud_with_shared_space_charge( ) my_new_part.append(ecloud_quad_new) self.my_list_eclouds.append(ecloud_quad_new) elif '_kick_element_' in ele.name1 and pp.enable_eclouds_at_kick_elements: i_in_optics = list(optics['name']).index(ele.name1) kick_name = optics['name'][i_in_optics] element_name = kick_name.split('_kick_element_')[-1] L_curr = optics['L_interaction'][i_in_optics] buildup_folder = pp.path_buildup_simulations_kick_elements.replace( '!!!NAME!!!', element_name) chamber_fname = '%s_chamber.mat' % (element_name) B_multip_curr = [0., optics['gradB'][i_in_optics]] x_beam_offset = optics['x'][i_in_optics] * pp.orbit_factor y_beam_offset = optics['y'][i_in_optics] * pp.orbit_factor sigma_x_local = np.sqrt(optics['beta_x'][i_in_optics] * pp.epsn_x / self.machine.betagamma) sigma_y_local = np.sqrt(optics['beta_y'][i_in_optics] * pp.epsn_y / self.machine.betagamma) ecloud_ele = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=L_curr, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type='polyg', x_aper=None, y_aper=None, filename_chm=buildup_folder + '/' + chamber_fname, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid={ 'x_min_target': -pp.target_size_internal_grid_sigma * sigma_x_local + x_beam_offset, 'x_max_target': pp.target_size_internal_grid_sigma * sigma_x_local + x_beam_offset, 'y_min_target': -pp.target_size_internal_grid_sigma * sigma_y_local + y_beam_offset, 'y_max_target': pp.target_size_internal_grid_sigma * sigma_y_local + y_beam_offset, 'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_y_local }, N_mp_max=pp.N_mp_max_quad, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_curr, filename_init_MP_state=buildup_folder + '/' + pp.name_MP_state_file_kick_elements, x_beam_offset=x_beam_offset, y_beam_offset=y_beam_offset, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) my_new_part.append(ecloud_ele) self.my_list_eclouds.append(ecloud_ele) self.mypart = my_new_part if pp.footprint_mode: print 'Proc. %d computing maps' % myid # generate a bunch bunch_for_map = self.machine.generate_6D_Gaussian_bunch_matched( n_macroparticles=pp.n_macroparticles_for_footprint_map, intensity=pp.intensity, epsn_x=pp.epsn_x, epsn_y=pp.epsn_y, sigma_z=pp.sigma_z) # Slice the bunch slicer_for_map = UniformBinSlicer(n_slices=pp.n_slices, z_cuts=(-pp.z_cut, pp.z_cut)) slices_list_for_map = bunch_for_map.extract_slices(slicer_for_map) #Track the previous part of the machine for ele in self.machine.one_turn_map[:i_start_part]: for ss in slices_list_for_map: ele.track(ss) # Measure optics, track and replace clouds with maps list_ele_type = [] list_meas_beta_x = [] list_meas_alpha_x = [] list_meas_beta_y = [] list_meas_alpha_y = [] for ele in self.mypart: list_ele_type.append(str(type(ele))) # Measure optics bbb = sum(slices_list_for_map) list_meas_beta_x.append(bbb.beta_Twiss_x()) list_meas_alpha_x.append(bbb.alpha_Twiss_x()) list_meas_beta_y.append(bbb.beta_Twiss_y()) list_meas_alpha_y.append(bbb.alpha_Twiss_y()) if ele in self.my_list_eclouds: ele.track_once_and_replace_with_recorded_field_map( slices_list_for_map) else: for ss in slices_list_for_map: ele.track(ss) print 'Proc. %d done with maps' % myid with open('measured_optics_%d.pkl' % myid, 'wb') as fid: pickle.dump( { 'ele_type': list_ele_type, 'beta_x': list_meas_beta_x, 'alpha_x': list_meas_alpha_x, 'beta_y': list_meas_beta_y, 'alpha_y': list_meas_alpha_y, }, fid) #remove RF if self.ring_of_CPUs.I_am_the_master: self.non_parallel_part.remove(self.machine.longitudinal_map)
class Simulation(object): def __init__(self): self.N_turns = pp.N_turns self.pp = pp def init_all(self): self.n_slices = pp.n_slices # read the optics if needed if pp.optics_pickle_file is not None: with open(pp.optics_pickle_file) as fid: optics = pickle.load(fid) self.n_kick_smooth = np.sum( ['_kick_smooth_' in nn for nn in optics['name']]) else: optics = None self.n_kick_smooth = pp.n_segments # define the machine from LHC_custom import LHC self.machine = LHC(n_segments=pp.n_segments, machine_configuration=pp.machine_configuration, beta_x=pp.beta_x, beta_y=pp.beta_y, accQ_x=pp.Q_x, accQ_y=pp.Q_y, Qp_x=pp.Qp_x, Qp_y=pp.Qp_y, octupole_knob=pp.octupole_knob, optics_dict=optics, V_RF=pp.V_RF) self.n_segments = self.machine.transverse_map.n_segments # compute sigma inj_opt = self.machine.transverse_map.get_injection_optics() sigma_x_inj = np.sqrt(inj_opt['beta_x'] * pp.epsn_x / self.machine.betagamma) sigma_y_inj = np.sqrt(inj_opt['beta_y'] * pp.epsn_y / self.machine.betagamma) if pp.optics_pickle_file is None: sigma_x_smooth = sigma_x_inj sigma_y_smooth = sigma_y_inj else: beta_x_smooth = None beta_y_smooth = None for ele in self.machine.one_turn_map: if ele in self.machine.transverse_map: if '_kick_smooth_' in ele.name1: if beta_x_smooth is None: beta_x_smooth = ele.beta_x1 beta_y_smooth = ele.beta_y1 else: if beta_x_smooth != ele.beta_x1 or beta_y_smooth != ele.beta_y1: raise ValueError( 'Smooth kicks must have all the same beta') if beta_x_smooth is None: sigma_x_smooth = None sigma_y_smooth = None else: sigma_x_smooth = np.sqrt(beta_x_smooth * pp.epsn_x / self.machine.betagamma) sigma_y_smooth = np.sqrt(beta_y_smooth * pp.epsn_y / self.machine.betagamma) # define MP size nel_mp_ref_0 = pp.init_unif_edens_dip * 4 * pp.x_aper * pp.y_aper / pp.N_MP_ele_init_dip # prepare e-cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT if pp.custom_target_grid_arcs is not None: target_grid_arcs = pp.custom_target_grid_arcs else: target_grid_arcs = { 'x_min_target': -pp.target_size_internal_grid_sigma * sigma_x_smooth, 'x_max_target': pp.target_size_internal_grid_sigma * sigma_x_smooth, 'y_min_target': -pp.target_size_internal_grid_sigma * sigma_y_smooth, 'y_max_target': pp.target_size_internal_grid_sigma * sigma_y_smooth, 'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_x_smooth } self.target_grid_arcs = target_grid_arcs if pp.enable_arc_dip: ecloud_dip = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / self.n_kick_smooth * pp.fraction_device_dip, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, init_unif_edens_flag=pp.init_unif_edens_flag_dip, init_unif_edens=pp.init_unif_edens_dip, N_mp_max=pp.N_mp_max_dip, nel_mp_ref_0=nel_mp_ref_0, B_multip=pp.B_multip_dip, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) if pp.enable_arc_quad: ecloud_quad = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / self.n_kick_smooth * pp.fraction_device_quad, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type=pp.chamb_type, x_aper=pp.x_aper, y_aper=pp.y_aper, filename_chm=pp.filename_chm, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid=target_grid_arcs, N_mp_max=pp.N_mp_max_quad, nel_mp_ref_0=nel_mp_ref_0, B_multip=pp.B_multip_quad, filename_init_MP_state=pp.filename_init_MP_state_quad, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_dip: with open('multigrid_config_dip.txt', 'w') as fid: if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'): fid.write(repr(ecloud_dip.spacech_ele.PyPICobj.grids)) else: fid.write("Single grid.") with open('multigrid_config_dip.pkl', 'w') as fid: if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'): pickle.dump(ecloud_dip.spacech_ele.PyPICobj.grids, fid) else: pickle.dump('Single grid.', fid) if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_quad: with open('multigrid_config_quad.txt', 'w') as fid: if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'): fid.write(repr(ecloud_quad.spacech_ele.PyPICobj.grids)) else: fid.write("Single grid.") with open('multigrid_config_quad.pkl', 'w') as fid: if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'): pickle.dump(ecloud_quad.spacech_ele.PyPICobj.grids, fid) else: pickle.dump('Single grid.', fid) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY( x_aper=pp.target_size_internal_grid_sigma * sigma_x_inj, y_aper=pp.target_size_internal_grid_sigma * sigma_y_inj) self.machine.one_turn_map.append(apt_xy) if pp.enable_transverse_damper: # setup transverse damper from PyHEADTAIL.feedback.transverse_damper import TransverseDamper damper = TransverseDamper(dampingrate_x=pp.dampingrate_x, dampingrate_y=pp.dampingrate_y) self.machine.one_turn_map.append(damper) # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len( self.machine.one_turn_map) - pp.n_non_parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d/%d (worker) and my part is %d long' % ( myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d/%d (master) and my part is %d long' % ( myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) #install eclouds in my part my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: if pp.optics_pickle_file is None or '_kick_smooth_' in ele.name1: if pp.enable_arc_dip: ecloud_dip_new = ecloud_dip.generate_twin_ecloud_with_shared_space_charge( ) my_new_part.append(ecloud_dip_new) self.my_list_eclouds.append(ecloud_dip_new) if pp.enable_arc_quad: ecloud_quad_new = ecloud_quad.generate_twin_ecloud_with_shared_space_charge( ) my_new_part.append(ecloud_quad_new) self.my_list_eclouds.append(ecloud_quad_new) elif '_kick_element_' in ele.name1 and pp.enable_eclouds_at_kick_elements: i_in_optics = list(optics['name']).index(ele.name1) kick_name = optics['name'][i_in_optics] element_name = kick_name.split('_kick_element_')[-1] L_curr = optics['L_interaction'][i_in_optics] buildup_folder = pp.path_buildup_simulations_kick_elements.replace( '!!!NAME!!!', element_name) chamber_fname = '%s_chamber.mat' % (element_name) B_multip_curr = [0., optics['gradB'][i_in_optics]] x_beam_offset = optics['x'][i_in_optics] * pp.orbit_factor y_beam_offset = optics['y'][i_in_optics] * pp.orbit_factor sigma_x_local = np.sqrt(optics['beta_x'][i_in_optics] * pp.epsn_x / self.machine.betagamma) sigma_y_local = np.sqrt(optics['beta_y'][i_in_optics] * pp.epsn_y / self.machine.betagamma) ecloud_ele = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=L_curr, slicer=None, Dt_ref=pp.Dt_ref, pyecl_input_folder=pp.pyecl_input_folder, chamb_type='polyg', x_aper=None, y_aper=None, filename_chm=buildup_folder + '/' + chamber_fname, PyPICmode=pp.PyPICmode, Dh_sc=pp.Dh_sc_ext, N_min_Dh_main=pp.N_min_Dh_main, f_telescope=pp.f_telescope, N_nodes_discard=pp.N_nodes_discard, target_grid={ 'x_min_target': -pp.target_size_internal_grid_sigma * sigma_x_local + x_beam_offset, 'x_max_target': pp.target_size_internal_grid_sigma * sigma_x_local + x_beam_offset, 'y_min_target': -pp.target_size_internal_grid_sigma * sigma_y_local + y_beam_offset, 'y_max_target': pp.target_size_internal_grid_sigma * sigma_y_local + y_beam_offset, 'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_y_local }, N_mp_max=pp.N_mp_max_quad, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_curr, filename_init_MP_state=buildup_folder + '/' + pp.name_MP_state_file_kick_elements, x_beam_offset=x_beam_offset, y_beam_offset=y_beam_offset, enable_kick_x=pp.enable_kick_x, enable_kick_y=pp.enable_kick_y) my_new_part.append(ecloud_ele) self.my_list_eclouds.append(ecloud_ele) self.mypart = my_new_part if pp.footprint_mode: print 'Proc. %d computing maps' % myid # generate a bunch bunch_for_map = self.machine.generate_6D_Gaussian_bunch_matched( n_macroparticles=pp.n_macroparticles_for_footprint_map, intensity=pp.intensity, epsn_x=pp.epsn_x, epsn_y=pp.epsn_y, sigma_z=pp.sigma_z) # Slice the bunch slicer_for_map = UniformBinSlicer(n_slices=pp.n_slices, z_cuts=(-pp.z_cut, pp.z_cut)) slices_list_for_map = bunch_for_map.extract_slices(slicer_for_map) #Track the previous part of the machine for ele in self.machine.one_turn_map[:i_start_part]: for ss in slices_list_for_map: ele.track(ss) # Measure optics, track and replace clouds with maps list_ele_type = [] list_meas_beta_x = [] list_meas_alpha_x = [] list_meas_beta_y = [] list_meas_alpha_y = [] for ele in self.mypart: list_ele_type.append(str(type(ele))) # Measure optics bbb = sum(slices_list_for_map) list_meas_beta_x.append(bbb.beta_Twiss_x()) list_meas_alpha_x.append(bbb.alpha_Twiss_x()) list_meas_beta_y.append(bbb.beta_Twiss_y()) list_meas_alpha_y.append(bbb.alpha_Twiss_y()) if ele in self.my_list_eclouds: ele.track_once_and_replace_with_recorded_field_map( slices_list_for_map) else: for ss in slices_list_for_map: ele.track(ss) print 'Proc. %d done with maps' % myid with open('measured_optics_%d.pkl' % myid, 'wb') as fid: pickle.dump( { 'ele_type': list_ele_type, 'beta_x': list_meas_beta_x, 'alpha_x': list_meas_alpha_x, 'beta_y': list_meas_beta_y, 'alpha_y': list_meas_alpha_y, }, fid) #remove RF if self.ring_of_CPUs.I_am_the_master: self.non_parallel_part.remove(self.machine.longitudinal_map) def init_master(self): # Manage multi-job operation if pp.footprint_mode: if pp.N_turns != pp.N_turns_target: raise ValueError( 'In footprint mode you need to set N_turns_target=N_turns_per_run!' ) import PyPARIS_sim_class.Save_Load_Status as SLS SimSt = SLS.SimulationStatus(N_turns_per_run=pp.N_turns, check_for_resubmit=True, N_turns_target=pp.N_turns_target) SimSt.before_simulation() self.SimSt = SimSt # generate a bunch if pp.footprint_mode: self.bunch = self.machine.generate_6D_Gaussian_bunch_matched( n_macroparticles=pp.n_macroparticles_for_footprint_track, intensity=pp.intensity, epsn_x=pp.epsn_x, epsn_y=pp.epsn_y, sigma_z=pp.sigma_z) elif SimSt.first_run: if pp.bunch_from_file is not None: print 'Loading bunch from file %s ...' % pp.bunch_from_file with h5py.File(pp.bunch_from_file, 'r') as fid: self.bunch = self.buffer_to_piece( np.array(fid['bunch']).copy()) print 'Bunch loaded from file.\n' else: self.bunch = self.machine.generate_6D_Gaussian_bunch_matched( n_macroparticles=pp.n_macroparticles, intensity=pp.intensity, epsn_x=pp.epsn_x, epsn_y=pp.epsn_y, sigma_z=pp.sigma_z) # compute initial displacements inj_opt = self.machine.transverse_map.get_injection_optics() sigma_x = np.sqrt(inj_opt['beta_x'] * pp.epsn_x / self.machine.betagamma) sigma_y = np.sqrt(inj_opt['beta_y'] * pp.epsn_y / self.machine.betagamma) x_kick = pp.x_kick_in_sigmas * sigma_x y_kick = pp.y_kick_in_sigmas * sigma_y # apply initial displacement if not pp.footprint_mode: self.bunch.x += x_kick self.bunch.y += y_kick print 'Bunch initialized.' else: print 'Loading bunch from file...' with h5py.File( 'bunch_status_part%02d.h5' % (SimSt.present_simulation_part - 1), 'r') as fid: self.bunch = self.buffer_to_piece( np.array(fid['bunch']).copy()) print 'Bunch loaded from file.' # initial slicing self.slicer = UniformBinSlicer(n_slices=pp.n_slices, z_cuts=(-pp.z_cut, pp.z_cut)) # define a bunch monitor from PyHEADTAIL.monitors.monitors import BunchMonitor self.bunch_monitor = BunchMonitor( 'bunch_evolution_%02d' % self.SimSt.present_simulation_part, pp.N_turns, {'Comment': 'PyHDTL simulation'}, write_buffer_every=3) # define a slice monitor from PyHEADTAIL.monitors.monitors import SliceMonitor self.slice_monitor = SliceMonitor( 'slice_evolution_%02d' % self.SimSt.present_simulation_part, pp.N_turns, self.slicer, {'Comment': 'PyHDTL simulation'}, write_buffer_every=3) #slice for the first turn slice_obj_list = self.bunch.extract_slices(self.slicer) pieces_to_be_treated = slice_obj_list print 'N_turns', self.N_turns if pp.footprint_mode: self.recorded_particles = ParticleTrajectories( pp.n_macroparticles_for_footprint_track, self.N_turns) return pieces_to_be_treated def init_worker(self): pass def treat_piece(self, piece): for ele in self.mypart: ele.track(piece) def finalize_turn_on_master(self, pieces_treated): # re-merge bunch self.bunch = sum(pieces_treated) #finalize present turn (with non parallel part, e.g. synchrotron motion) for ele in self.non_parallel_part: ele.track(self.bunch) # save results #print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn) self.bunch_monitor.dump(self.bunch) self.slice_monitor.dump(self.bunch) # prepare next turn (re-slice) new_pieces_to_be_treated = self.bunch.extract_slices(self.slicer) # order reset of all clouds orders_to_pass = ['reset_clouds'] if pp.footprint_mode: self.recorded_particles.dump(self.bunch) # check if simulation has to be stopped # 1. for beam losses if not pp.footprint_mode and self.bunch.macroparticlenumber < pp.sim_stop_frac * pp.n_macroparticles: orders_to_pass.append('stop') self.SimSt.check_for_resubmit = False print 'Stop simulation due to beam losses.' # 2. for the emittance growth if pp.flag_check_emittance_growth: epsn_x_max = (pp.epsn_x) * (1 + pp.epsn_x_max_growth_fraction) epsn_y_max = (pp.epsn_y) * (1 + pp.epsn_y_max_growth_fraction) if not pp.footprint_mode and (self.bunch.epsn_x() > epsn_x_max or self.bunch.epsn_y() > epsn_y_max): orders_to_pass.append('stop') self.SimSt.check_for_resubmit = False print 'Stop simulation due to emittance growth.' return orders_to_pass, new_pieces_to_be_treated def execute_orders_from_master(self, orders_from_master): if 'reset_clouds' in orders_from_master: for ec in self.my_list_eclouds: ec.finalize_and_reinitialize() def finalize_simulation(self): if pp.footprint_mode: # Tunes import NAFFlib print 'NAFFlib spectral analysis...' qx_i = np.empty_like(self.recorded_particles.x_i[:, 0]) qy_i = np.empty_like(self.recorded_particles.x_i[:, 0]) for ii in range(len(qx_i)): qx_i[ii] = NAFFlib.get_tune(self.recorded_particles.x_i[ii] + 1j * self.recorded_particles.xp_i[ii]) qy_i[ii] = NAFFlib.get_tune(self.recorded_particles.y_i[ii] + 1j * self.recorded_particles.yp_i[ii]) print 'NAFFlib spectral analysis done.' # Save import h5py dict_beam_status = {\ 'x_init': np.squeeze(self.recorded_particles.x_i[:,0]), 'xp_init': np.squeeze(self.recorded_particles.xp_i[:,0]), 'y_init': np.squeeze(self.recorded_particles.y_i[:,0]), 'yp_init': np.squeeze(self.recorded_particles.yp_i[:,0]), 'z_init': np.squeeze(self.recorded_particles.z_i[:,0]), 'qx_i': qx_i, 'qy_i': qy_i, 'x_centroid': np.mean(self.recorded_particles.x_i, axis=1), 'y_centroid': np.mean(self.recorded_particles.y_i, axis=1)} with h5py.File('footprint.h5', 'w') as fid: for kk in dict_beam_status.keys(): fid[kk] = dict_beam_status[kk] else: #save data for multijob operation and launch new job import h5py with h5py.File( 'bunch_status_part%02d.h5' % (self.SimSt.present_simulation_part), 'w') as fid: fid['bunch'] = self.piece_to_buffer(self.bunch) if not self.SimSt.first_run: os.system('rm bunch_status_part%02d.h5' % (self.SimSt.present_simulation_part - 1)) self.SimSt.after_simulation() def piece_to_buffer(self, piece): buf = ch.beam_2_buffer(piece) return buf def buffer_to_piece(self, buf): piece = ch.buffer_2_beam(buf) return piece
def init_all(self): print('Exec init...') from LHC_custom import LHC self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration, Qp_x=Qp_x, Qp_y=Qp_y, octupole_knob=octupole_knob) self.n_non_parallelizable = 1 #RF inj_optics = self.machine.transverse_map.get_injection_optics() sigma_x_smooth = np.sqrt(inj_optics['beta_x']*epsn_x/self.machine.betagamma) sigma_y_smooth = np.sqrt(inj_optics['beta_y']*epsn_y/self.machine.betagamma) if flag_aperture: # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY(x_aper=target_size_internal_grid_sigma*sigma_x_smooth, y_aper=target_size_internal_grid_sigma*sigma_x_smooth) self.machine.one_turn_map.append(apt_xy) self.n_non_parallelizable +=1 if enable_transverse_damper: # setup transverse damper from PyHEADTAIL.feedback.transverse_damper import TransverseDamper damper = TransverseDamper(dampingrate_x=dampingrate_x, dampingrate_y=dampingrate_y) self.machine.one_turn_map.append(damper) self.n_non_parallelizable +=1 if enable_ecloud: print('Build ecloud...') import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud( L_ecloud=L_ecloud_tot/n_segments, slicer=None, slice_by_slice_mode=True, Dt_ref=5e-12, pyecl_input_folder='./pyecloud_config', chamb_type = 'polyg' , filename_chm= 'LHC_chm_ver.mat', #init_unif_edens_flag=1, #init_unif_edens=1e7, #N_mp_max = 3000000, #nel_mp_ref_0 = 1e7/(0.7*3000000), #B_multip = [0.], #~ PyPICmode = 'ShortleyWeller_WithTelescopicGrids', #~ f_telescope = 0.3, target_grid = {'x_min_target':-target_size_internal_grid_sigma*sigma_x_smooth, 'x_max_target':target_size_internal_grid_sigma*sigma_x_smooth, 'y_min_target':-target_size_internal_grid_sigma*sigma_y_smooth,'y_max_target':target_size_internal_grid_sigma*sigma_y_smooth, 'Dh_target':.2*sigma_x_smooth}, #~ N_nodes_discard = 10., #~ N_min_Dh_main = 10, #x_beam_offset = x_beam_offset, #y_beam_offset = y_beam_offset, #probes_position = probes_position, save_pyecl_outp_as = 'cloud_evol_ring%d'%self.ring_of_CPUs.myring, save_only = ['lam_t_array', 'nel_hist', 'Nel_timep', 't', 't_hist', 'xg_hist'], sparse_solver = 'PyKLU', enable_kick_x=enable_kick_x, enable_kick_y=enable_kick_y) print('Done.') # split the machine i_end_parallel = len(self.machine.one_turn_map)-self.n_non_parallelizable sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes_per_ring) i_start_part, i_end_part = sharing.my_part(self.ring_of_CPUs.myid_in_ring) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_at_end_ring: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] #install eclouds in my part if enable_ecloud: my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: if ele in self.machine.transverse_map: ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge() # we save buildup info only for the first cloud in each ring if self.ring_of_CPUs.myid_in_ring>0 or len(self.my_list_eclouds)>0: ecloud_new.remove_savers() my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) my_new_part.append(ele) self.mypart = my_new_part print('Hello, I am %d.%d, my part looks like: %s. Saver status: %s'%( self.ring_of_CPUs.myring, self.ring_of_CPUs.myid_in_ring, self.mypart, [(ec.cloudsim.cloud_list[0].pyeclsaver is not None) for ec in self.my_list_eclouds]))
class Simulation(object): def __init__(self): self.N_turns = N_turns def init_all(self): self.n_slices = n_slices self.n_segments = n_segments # define the machine from LHC_custom import LHC self.machine = LHC(n_segments=n_segments, machine_configuration=machine_configuration) # define MP size nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init # prepare e-cloud import PyECLOUD.PyEC4PyHT as PyEC4PyHT ecloud = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=self.machine.circumference / n_segments, slicer=None, Dt_ref=Dt_ref, pyecl_input_folder=pyecl_input_folder, chamb_type=chamb_type, x_aper=x_aper, y_aper=y_aper, filename_chm=filename_chm, Dh_sc=Dh_sc, init_unif_edens_flag=init_unif_edens_flag, init_unif_edens=init_unif_edens, N_mp_max=N_mp_max, nel_mp_ref_0=nel_mp_ref_0, B_multip=B_multip_per_eV * self.machine.p0 / e * c) # setup transverse losses (to "protect" the ecloud) import PyHEADTAIL.aperture.aperture as aperture apt_xy = aperture.EllipticalApertureXY( x_aper=ecloud.impact_man.chamb.x_aper, y_aper=ecloud.impact_man.chamb.y_aper) self.machine.one_turn_map.append(apt_xy) n_non_parallelizable = 2 #rf and aperture # We suppose that all the object that cannot be slice parallelized are at the end of the ring i_end_parallel = len(self.machine.one_turn_map) - n_non_parallelizable # split the machine sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes) myid = self.ring_of_CPUs.myid i_start_part, i_end_part = sharing.my_part(myid) self.mypart = self.machine.one_turn_map[i_start_part:i_end_part] if self.ring_of_CPUs.I_am_a_worker: print 'I am id=%d/%d (worker) and my part is %d long' % ( myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) elif self.ring_of_CPUs.I_am_the_master: self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:] print 'I am id=%d/%d (master) and my part is %d long' % ( myid, self.ring_of_CPUs.N_nodes, len(self.mypart)) #install eclouds in my part my_new_part = [] self.my_list_eclouds = [] for ele in self.mypart: my_new_part.append(ele) if ele in self.machine.transverse_map: ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge( ) my_new_part.append(ecloud_new) self.my_list_eclouds.append(ecloud_new) self.mypart = my_new_part def init_master(self): # generate a bunch bunch = self.machine.generate_6D_Gaussian_bunch_matched( n_macroparticles=n_macroparticles, intensity=intensity, epsn_x=epsn_x, epsn_y=epsn_y, sigma_z=sigma_z) print 'Bunch initialized.' # initial slicing from PyHEADTAIL.particles.slicing import UniformBinSlicer self.slicer = UniformBinSlicer(n_slices=n_slices, n_sigma_z=n_sigma_z) # compute initial displacements inj_opt = self.machine.transverse_map.get_injection_optics() sigma_x = np.sqrt(inj_opt['beta_x'] * epsn_x / self.machine.betagamma) sigma_y = np.sqrt(inj_opt['beta_y'] * epsn_y / self.machine.betagamma) x_kick = x_kick_in_sigmas * sigma_x y_kick = y_kick_in_sigmas * sigma_y # apply initial displacement bunch.x += x_kick bunch.y += y_kick # define a bunch monitor from PyHEADTAIL.monitors.monitors import BunchMonitor self.bunch_monitor = BunchMonitor('bunch_evolution', N_turns, {'Comment': 'PyHDTL simulation'}, write_buffer_every=8) #slice for the first turn slice_obj_list = bunch.extract_slices(self.slicer) pieces_to_be_treated = slice_obj_list print 'N_turns', self.N_turns return pieces_to_be_treated def init_worker(self): pass def treat_piece(self, piece): for ele in self.mypart: ele.track(piece) def finalize_turn_on_master(self, pieces_treated): # re-merge bunch bunch = sum(pieces_treated) #finalize present turn (with non parallel part, e.g. synchrotron motion) for ele in self.non_parallel_part: ele.track(bunch) # save results #print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn) self.bunch_monitor.dump(bunch) # prepare next turn (re-slice) new_pieces_to_be_treated = bunch.extract_slices(self.slicer) orders_to_pass = ['reset_clouds'] return orders_to_pass, new_pieces_to_be_treated def execute_orders_from_master(self, orders_from_master): if 'reset_clouds' in orders_from_master: for ec in self.my_list_eclouds: ec.finalize_and_reinitialize() def finalize_simulation(self): pass def piece_to_buffer(self, piece): buf = ch.beam_2_buffer(piece) return buf def buffer_to_piece(self, buf): piece = ch.buffer_2_beam(buf) return piece
import sys sys.path.append('../../../../') from scipy.constants import c as clight, e as qe import matplotlib.pyplot as plt import numpy as np import PyECLOUD.PyEC4PyHT as PyEC4PyHT from PyHEADTAIL.particles.slicing import UniformBinSlicer import PyECLOUD.mystyle as ms from LHC_custom import LHC machine_configuration = 'HLLHC-injection' machine = LHC(n_segments=1, machine_configuration=machine_configuration) bunch = machine.generate_6D_Gaussian_bunch(n_macroparticles=300000, intensity=1.15e11, epsn_x=2.5e-6, epsn_y=2.5e-6, sigma_z=0.11) bunch.x[bunch.z < 5e-2] += 1e-3 ecloud_ele = PyEC4PyHT.Ecloud( slice_by_slice_mode=True, L_ecloud=1., slicer=None, Dt_ref=25e-12, pyecl_input_folder='pyecloud_config', )