Beispiel #1
0
 def test_equality(self):
     '''Tests whether two slicers with the same config are equal
     in the sense of the == and != operator (calling __eq__, __ne__)
     '''
     unif_bin_slicer = UniformBinSlicer(self.nslices, z_cuts=self.z_cuts)
     unif_bin_slicer2 = UniformBinSlicer(self.nslices, z_cuts=self.z_cuts)
     self.assertTrue(
         unif_bin_slicer == unif_bin_slicer2,
         'comparing two uniform bin slicers with ' +
         'identical config using == returns False')
     self.assertFalse(
         unif_bin_slicer != unif_bin_slicer2,
         'comparing two uniform bin slicers with ' +
         'identical config using != returns True')
    def _add_wrapper_and_buncher(self):
        """Add longitudinal z wrapping around the circumference as
        well as a UniformBinSlicer for bunching the beam.
        """
        if self.longitudinal_mode is None:
            return

        elif self.longitudinal_mode == "linear":
            raise ValueError("Not implemented!!!!")

        elif self.longitudinal_mode == "non-linear":
            bucket = self.longitudinal_map.get_bucket(
                gamma=self.gamma, mass=self.mass, charge=self.charge
            )
            harmonic = bucket.h[0]
            bucket_length = self.circumference / harmonic
            z_beam_center = (
                bucket.z_ufp_separatrix + bucket_length - self.circumference / 2.0
            )
            self.z_wrapper = LongWrapper(
                circumference=self.circumference, z0=z_beam_center
            )
            self.one_turn_map.append(self.z_wrapper)
            self.buncher = UniformBinSlicer(
                harmonic, z_cuts=(self.z_wrapper.z_min, self.z_wrapper.z_max)
            )

        else:
            raise NotImplementedError("Something wrong with longitudinal_mode")
Beispiel #3
0
    def init_master(self):

        # beam parameters
        epsn_x = 2.5e-6
        epsn_y = 3.5e-6
        sigma_z = 0.05
        intensity = 1e11
        macroparticlenumber_track = 50000

        # initialization bunch
        bunch = self.machine.generate_6D_Gaussian_bunch_matched(
            macroparticlenumber_track,
            intensity,
            epsn_x,
            epsn_y,
            sigma_z=sigma_z)
        print 'Bunch initialized.'

        # initial slicing
        from PyHEADTAIL.particles.slicing import UniformBinSlicer
        self.slicer = UniformBinSlicer(n_slices=self.n_slices,
                                       z_cuts=(-self.z_cut, self.z_cut))

        #slice for the first turn
        slice_obj_list = bunch.extract_slices(self.slicer)

        #prepare to save results
        self.beam_x, self.beam_y, self.beam_z = [], [], []
        self.sx, self.sy, self.sz = [], [], []
        self.epsx, self.epsy, self.epsz = [], [], []

        pieces_to_be_treated = slice_obj_list

        return pieces_to_be_treated
def generate_objects(machine,
                     n_macroparticles,
                     n_slices,
                     n_sigma_z,
                     intensity=1e11,
                     sigma_z=0.1124,
                     epsn_x=3.5e-6,
                     epsn_y=3.5e-6,
                     filling_scheme=[0],
                     matched=False):

    bunch = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                               intensity,
                                               epsn_x,
                                               epsn_y,
                                               sigma_z=sigma_z,
                                               filling_scheme=filling_scheme,
                                               matched=matched)

    slicer = UniformBinSlicer(n_slices=n_slices,
                              n_sigma_z=n_sigma_z,
                              circumference=machine.circumference,
                              h_bunch=machine.h_bunch)

    return bunch, slicer, machine.transverse_map, machine.longitudinal_map
Beispiel #5
0
 def test_sliceset_computations(self):
     '''
     macroparticles per slice, particles_within_cuts
     require a sliceset as a parameter
     Check that CPU/GPU functions yield the same result (if both exist)
     No complete tracking, only bare functions.
     '''
     fname = ['particles_within_cuts', 'macroparticles_per_slice']
     pm.update_active_dict(pm._CPU_numpy_func_dict)
     np.random.seed(0)
     n = 999
     b = self.create_gaussian_bunch(n)
     b.sort_for('z')
     slicer = UniformBinSlicer(n_slices=20, n_sigma_z=2)
     s_set = b.get_slices(slicer)
     z_cpu = b.z.copy()
     z_gpu = pycuda.gpuarray.to_gpu(z_cpu)
     sliceset_cpu = s_set
     sliceset_gpu = copy.deepcopy(s_set)
     sliceset_gpu.slice_index_of_particle = pycuda.gpuarray.to_gpu(
         s_set.slice_index_of_particle)
     params_cpu = [sliceset_cpu]
     params_gpu = [sliceset_gpu]
     for f in fname:
         res_cpu = pm._CPU_numpy_func_dict[f](*params_cpu)
         res_gpu = pm._GPU_func_dict[f](*params_gpu)
         self.assertTrue(
             np.allclose(res_cpu, res_gpu.get()),
             'CPU/GPU version of ' + f + ' dont yield the same result')
Beispiel #6
0
    def test_wakefield_platesresonator(self):
        '''
        Track through a ParallelPlatesResonator wakefield
        '''
        Dx = np.append(np.linspace(0., 20., self.nsegments), [0])
        # add some dispersion/alpha
        lhc = m.LHC(n_segments=self.nsegments,
                    machine_configuration='450GeV',
                    app_x=1e-9,
                    app_y=2e-9,
                    app_xy=-1.5e-11,
                    chromaticity_on=False,
                    amplitude_detuning_on=True,
                    alpha_x=1.2 * np.ones(self.nsegments),
                    D_x=Dx,
                    printer=SilentPrinter())

        self.n_macroparticles = 200000
        bunch_cpu = self.create_lhc_bunch(lhc)  #self.create_gaussian_bunch()
        bunch_gpu = self.create_lhc_bunch(lhc)  #self.create_gaussian_bunch()
        n_slices = 50  #5
        frequency = 8e8  #1e9
        R_shunt = 23e3  # [Ohm]
        Q = 1.
        unif_bin_slicer = UniformBinSlicer(n_slices=n_slices, n_sigma_z=1)
        #res = CircularResonator(R_shunt=R_shunt, frequency=frequency, Q=Q)
        res = ParallelPlatesResonator(R_shunt=R_shunt,
                                      frequency=frequency,
                                      Q=Q,
                                      printer=SilentPrinter())
        wake_field = WakeField(unif_bin_slicer, res)
        self.assertTrue(
            self._track_cpu_gpu([wake_field], bunch_cpu, bunch_gpu),
            'Tracking Wakefield CircularResonator CPU/GPU differs')
Beispiel #7
0
    def init_master(self, generate_bunch=True, prepare_monitors=True):

        pp = self.pp

        # Manage multi-job operation
        if pp.footprint_mode:
            if pp.N_turns != pp.N_turns_target:
                raise ValueError(
                    "In footprint mode you need to set N_turns_target=N_turns_per_run!"
                )
        self._setup_multijob_mode()

        # Define slicer
        self.slicer = UniformBinSlicer(n_slices=pp.n_slices,
                                       z_cuts=(-pp.z_cut, pp.z_cut))

        # Prepare monitors
        if prepare_monitors:
            self._prepare_monitors()

        # generate the bunch and slice for the first turn
        if generate_bunch:
            self._generate_bunch()
            slice_obj_list = self.bunch.extract_slices(self.slicer)
            pieces_to_be_treated = slice_obj_list
        else:
            pieces_to_be_treated = []

        print("N_turns", self.N_turns)

        if pp.footprint_mode:
            self.recorded_particles = ParticleTrajectories(
                pp.n_macroparticles_for_footprint_track, self.N_turns)

        return pieces_to_be_treated
Beispiel #8
0
    def setUp(self):
        #beam parameters
        self.intensity = 1.234e9
        self.circumference = 111.
        self.gamma = 20.1

        #simulation parameters
        self.macroparticlenumber = 100
        #must be multiple of nslices
        self.particlenumber_per_mp = self.intensity / self.macroparticlenumber

        #create a bunch
        self.bunch = self.create_bunch()

        #create a params for slicers
        self.nslices = 5
        self.z_cuts = (-20., 30.)  #asymmetric to check uniform_charge_slicer
        self.n_sigma_z = 5
        self.basic_slicer = UniformBinSlicer(self.nslices, z_cuts=self.z_cuts)
        self.basic_slice_set = self.basic_slicer.slice(self.bunch)
Beispiel #9
0
 def test_wakefield_wakefile(self):
     '''
     Track an LHC bunch and a LHC wakefield
     '''
     wakefile = 'autoruntests/wake_table.dat'  #'./wakeforhdtl_PyZbase_Allthemachine_450GeV_B1_LHC_inj_450GeV_B1.dat'
     Qp_x, Qp_y = 1., 1.
     Q_s = 0.0049
     n_macroparticles = 10
     intensity = 1e11
     longitudinal_focusing = 'linear'
     machine = m.LHC(n_segments=1,
                     machine_configuration='450GeV',
                     longitudinal_focusing=longitudinal_focusing,
                     Qp_x=[Qp_x],
                     Qp_y=[Qp_y],
                     Q_s=Q_s,
                     beta_x=[65.9756],
                     beta_y=[71.5255],
                     printer=SilentPrinter())
     epsn_x = 3.5e-6
     epsn_y = 3.5e-6
     sigma_z = 1.56e-9 * c / 4.
     np.random.seed(0)
     bunch_cpu = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                                    intensity,
                                                    epsn_x,
                                                    epsn_y,
                                                    sigma_z=sigma_z)
     np.random.seed(0)
     bunch_gpu = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                                    intensity,
                                                    epsn_x,
                                                    epsn_y,
                                                    sigma_z=sigma_z)
     n_slices_wakefields = 55
     n_sigma_z_wakefields = 3
     slicer_for_wakefields_cpu = UniformBinSlicer(
         n_slices_wakefields, n_sigma_z=n_sigma_z_wakefields)
     wake_components = [
         'time', 'dipole_x', 'dipole_y', 'no_quadrupole_x',
         'no_quadrupole_y', 'no_dipole_xy', 'no_dipole_yx'
     ]
     wake_table_cpu = WakeTable(wakefile,
                                wake_components,
                                printer=SilentPrinter())
     wake_field_cpu = WakeField(slicer_for_wakefields_cpu, wake_table_cpu)
     # also checked for 100 turns!
     self.assertTrue(
         self._track_cpu_gpu([wake_field_cpu],
                             bunch_cpu,
                             bunch_gpu,
                             nturns=2),
         'Tracking through WakeField(waketable) differs')
Beispiel #10
0
 def test_z_cuts_warning(self):
     '''Tests whether a warning is raised whenever
     z_cut_tail >= z_cut_head
     '''
     inverse_z_cuts = (-0.1, -0.3)
     warnings = AccumulatorPrinter()
     slicer = UniformBinSlicer(self.nslices,
                               z_cuts=inverse_z_cuts,
                               warningprinter=warnings)
     self.assertTrue(
         len(warnings.log) > 0,
         'no warning generated when z_cut head < z_cut tail')
	def init_master(self):
		
		# beam parameters
		sigma_z = 0.2
		intensity = 1.15e11
		macroparticlenumber_track = 300000

		# initialization bunch
		bunch = self.machine.generate_6D_Gaussian_bunch(
			macroparticlenumber_track, intensity, epsn_x, epsn_y, sigma_z=sigma_z)
		print 'Bunch initialized.'

		#replace particles with HDTL ones
		self.n_part_per_turn = 5000
		appo = np.loadtxt(filename)
		
		parid = np.reshape(appo[:,0], (-1, self.n_part_per_turn))[::self.n_segments,:]
		x = np.reshape(appo[:,1], (-1, self.n_part_per_turn))[::self.n_segments,:]
		xp = np.reshape(appo[:,2], (-1, self.n_part_per_turn))[::self.n_segments,:]
		y = np.reshape(appo[:,3], (-1, self.n_part_per_turn))[::self.n_segments,:]
		yp =np.reshape(appo[:,4], (-1, self.n_part_per_turn))[::self.n_segments,:]
		z = np.reshape(appo[:,5], (-1, self.n_part_per_turn))[::self.n_segments,:]
		zp = np.reshape(appo[:,6], (-1, self.n_part_per_turn))[::self.n_segments,:]

		# replace first particles with HEADTAIL ones
		bunch.x[:self.n_part_per_turn] = x[0,:]
		bunch.xp[:self.n_part_per_turn] = xp[0,:]
		bunch.y[:self.n_part_per_turn] = y[0,:]
		bunch.yp[:self.n_part_per_turn] = yp[0,:]
		bunch.z[:self.n_part_per_turn] = z[0,:]
		bunch.dp[:self.n_part_per_turn] =zp[0,:]

		# save id and momenta before track
		self.id_before = bunch.id[bunch.id<=self.n_part_per_turn]
		self.xp_before = bunch.xp[bunch.id<=self.n_part_per_turn]
		self.yp_before = bunch.yp[bunch.id<=self.n_part_per_turn]

		# initial slicing
		from PyHEADTAIL.particles.slicing import UniformBinSlicer
		self.slicer = UniformBinSlicer(n_slices = self.n_slices, n_sigma_z = 3.)

		self.rms_err_x_list = []
		self.rms_err_y_list = []
		
		#slice for the first turn
		slice_obj_list = bunch.extract_slices(self.slicer)

		pieces_to_be_treated = slice_obj_list
		
		print 'N_turns', self.N_turns

		return pieces_to_be_treated
Beispiel #12
0
 def _test_widebandfeedback(self):
     '''
     !!!!! Wiedeband feedback not ready yet! Skip test
     Track trough a Kicker (class in widebandfeedback)
     '''
     bunch_cpu = self.create_all1_bunch()
     bunch_gpu = self.create_all1_bunch()
     slices = bunch_cpu.get_slices(UniformBinSlicer(n_slices=5,
                                                    n_sigma_z=0))
     pickup = Pickup(slices)
     kicker = Kicker(pickup)
     self.assertTrue(
         self._track_cpu_gpu([kicker], bunch_cpu, bunch_gpu),
         'Tracking widebandfeedback CPU/GPU differs. Check if reslicing ' +
         'needed and test is wrong!')
Beispiel #13
0
 def test_per_slice_stats(self):
     '''
     All per_slice functions (mean, cov, ?emittance)
     Check that CPU/GPU functions yield the same result (if both exist)
     No complete tracking, only bare functions.
     '''
     fnames = ['mean_per_slice', 'std_per_slice']
     np.random.seed(0)
     n = 99999
     b = self.create_gaussian_bunch(n)
     b.sort_for('z')
     slicer = UniformBinSlicer(n_slices=777, n_sigma_z=None)
     s_set = b.get_slices(slicer)
     z_cpu = b.z.copy()
     z_gpu = pycuda.gpuarray.to_gpu(z_cpu)
     sliceset_cpu = s_set
     sliceset_gpu = copy.deepcopy(s_set)
     sliceset_gpu.slice_index_of_particle = pycuda.gpuarray.to_gpu(
         s_set.slice_index_of_particle)
     for fname in fnames:
         res_cpu = pm._CPU_numpy_func_dict[fname](sliceset_cpu, z_cpu)
         res_gpu = pm._GPU_func_dict[fname](sliceset_gpu, z_gpu)
         self.assertTrue(
             np.allclose(res_cpu, res_gpu.get()),
             'CPU/GPU version of ' + fname + ' dont yield the same result')
     fnames = ['emittance_per_slice']
     v_cpu = b.x
     v_gpu = pycuda.gpuarray.to_gpu(v_cpu)
     dp_cpu = z_cpu + np.arange(n) / n
     dp_gpu = pycuda.gpuarray.to_gpu(dp_cpu)
     for fname in fnames:
         res_cpu = pm._CPU_numpy_func_dict[fname](sliceset_cpu, z_cpu,
                                                  v_cpu, dp_cpu)
         res_gpu = pm._GPU_func_dict[fname](sliceset_gpu, z_gpu, v_gpu,
                                            dp_gpu)
         # only check things which aren't nan/None. Ignore RuntimeWarning!
         with np.errstate(invalid='ignore'):
             res_cpu = res_cpu[res_cpu > 1e-10]
             res_gpu = res_gpu.get()[res_gpu.get() > 1e-10]
         self.assertTrue(
             np.allclose(res_cpu, res_gpu),
             'CPU/GPU version of ' + fname + ' dont yield the same result')
    def init_master(self):

        # generate a bunch
        bunch = self.machine.generate_6D_Gaussian_bunch_matched(
            n_macroparticles=n_macroparticles,
            intensity=intensity,
            epsn_x=epsn_x,
            epsn_y=epsn_y,
            sigma_z=sigma_z)
        print 'Bunch initialized.'

        # initial slicing
        from PyHEADTAIL.particles.slicing import UniformBinSlicer
        self.slicer = UniformBinSlicer(n_slices=n_slices, n_sigma_z=n_sigma_z)

        # compute initial displacements
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x = np.sqrt(inj_opt['beta_x'] * epsn_x / self.machine.betagamma)
        sigma_y = np.sqrt(inj_opt['beta_y'] * epsn_y / self.machine.betagamma)
        x_kick = x_kick_in_sigmas * sigma_x
        y_kick = y_kick_in_sigmas * sigma_y

        # apply initial displacement
        bunch.x += x_kick
        bunch.y += y_kick

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor('bunch_evolution',
                                          N_turns,
                                          {'Comment': 'PyHDTL simulation'},
                                          write_buffer_every=8)

        #slice for the first turn
        slice_obj_list = bunch.extract_slices(self.slicer)

        pieces_to_be_treated = slice_obj_list

        print 'N_turns', self.N_turns

        return pieces_to_be_treated
Beispiel #15
0
    def _install_impedance(self):

        pp = self.pp
        if hasattr(pp, 'enable_impedance'):
            if pp.enable_impedance:

                slicer_for_wakefields = UniformBinSlicer(pp.n_slices_wake,
                                                         z_cuts=(-pp.z_cut,
                                                                 pp.z_cut))

                import PyHEADTAIL.impedances.wakes as wakes
                wake = wakes.CircularResonator(
                    R_shunt=pp.resonator_R_shunt,
                    frequency=pp.resonator_frequency,
                    Q=pp.resonator_Q)
                wake_element = wakes.WakeField(slicer_for_wakefields, wake)
                self.machine.one_turn_map.append(wake_element)
                self.n_non_parallelizable += 1
                self.impedances = [wake_element]
            else:
                self.impedances = []
Beispiel #16
0
    def setUp(self):
        #beam parameters
        self.intensity = 1.234e9
        self.circumference = 111.
        self.gamma = 20.1

        #simulation parameters
        self.macroparticlenumber = 100
         #must be multiple of nslices
        self.particlenumber_per_mp = self.intensity/self.macroparticlenumber

        #create a bunch
        self.bunch = self.create_bunch()

        #create a params for slicers
        self.nslices = 5
        self.z_cuts = (-20.,30.) #asymmetric to check uniform_charge_slicer
        self.n_sigma_z = 5
        self.basic_slicer = UniformBinSlicer(self.nslices,
                                             z_cuts=self.z_cuts)
        self.basic_slice_set = self.basic_slicer.slice(self.bunch)
Beispiel #17
0
 def test_slicemonitor(self):
     '''Test the slicemonitor, especially the statistics per slice functions
     '''
     self.monitor_fn = 'monitor'
     self.n_macroparticles = 1000
     nslices = 3
     n_sigma_z = 1
     n_steps = 5
     slicer = UniformBinSlicer(nslices, n_sigma_z)
     slicemonitor_1 = SliceMonitor(self.monitor_fn + '1',
                                   n_steps,
                                   slicer,
                                   write_buffer_every=2,
                                   buffer_size=3)
     slicemonitor_2 = SliceMonitor(self.monitor_fn + '2',
                                   n_steps,
                                   slicer,
                                   write_buffer_every=2,
                                   buffer_size=3)
     bunch_cpu = self.create_gaussian_bunch()
     bunch_gpu = self.create_gaussian_bunch()
     self._monitor_cpu_gpu(slicemonitor_1, slicemonitor_2, bunch_cpu,
                           bunch_gpu)
Beispiel #18
0
def slice_a_bunch(this_bunch, z_cut, n_slices):

    # Slice bunch if populated
    if this_bunch.slice_info['slice_4_EC']:
        bunch_center = this_bunch.slice_info['z_bin_center']
        this_slicer = UniformBinSlicer(z_cuts=(bunch_center - z_cut,
                                               bunch_center + z_cut),
                                       n_slices=n_slices)
        this_slices = this_bunch.extract_slices(this_slicer,
                                                include_non_sliced='always')

        sliced = this_slices[:-1]
        unsliced = this_slices[-1]
        if unsliced.slice_info != 'unsliced':
            raise ValueError("Something went wrong")

        for ss in sliced:
            ss.slice_info['interact_with_EC'] = True

        # Build head and tail slices
        mask_head = unsliced.z >= sliced[-1].slice_info['z_bin_right']
        mask_tail = unsliced.z <= sliced[0].slice_info['z_bin_left']


        slice_tail = Particles(macroparticlenumber=np.sum(mask_tail),
                            particlenumber_per_mp=unsliced.particlenumber_per_mp,
                            charge=unsliced.charge,
                            mass=unsliced.mass, circumference=unsliced.circumference,
                            gamma=unsliced.gamma,
                            coords_n_momenta_dict={\
                                'x': np.atleast_1d(unsliced.x[mask_tail]),
                                'xp':np.atleast_1d(unsliced.xp[mask_tail]),
                                'y':np.atleast_1d(unsliced.y[mask_tail]),
                                'yp':np.atleast_1d(unsliced.yp[mask_tail]),
                                'z':np.atleast_1d(unsliced.z[mask_tail]),
                                'dp':np.atleast_1d(unsliced.dp[mask_tail])})
        slice_tail.slice_info = {
            'z_bin_center':
            0.5 * (this_bunch.slice_info['z_bin_left'] +
                   sliced[0].slice_info['z_bin_left']),
            'z_bin_left':
            this_bunch.slice_info['z_bin_left'],
            'z_bin_right':
            sliced[0].slice_info['z_bin_left'],
            'interact_with_EC':
            False
        }


        slice_head = Particles(macroparticlenumber=np.sum(mask_head),
                            particlenumber_per_mp=unsliced.particlenumber_per_mp,
                            charge=unsliced.charge,
                            mass=unsliced.mass, circumference=unsliced.circumference,
                            gamma=unsliced.gamma,
                            coords_n_momenta_dict={\
                                'x': np.atleast_1d(unsliced.x[mask_head]),
                                'xp':np.atleast_1d(unsliced.xp[mask_head]),
                                'y':np.atleast_1d(unsliced.y[mask_head]),
                                'yp':np.atleast_1d(unsliced.yp[mask_head]),
                                'z':np.atleast_1d(unsliced.z[mask_head]),
                                'dp':np.atleast_1d(unsliced.dp[mask_head])})
        slice_head.slice_info = {
            'z_bin_center':
            0.5 * (this_bunch.slice_info['z_bin_right'] +
                   sliced[-1].slice_info['z_bin_right']),
            'z_bin_left':
            sliced[-1].slice_info['z_bin_right'],
            'z_bin_right':
            this_bunch.slice_info['z_bin_right'],
            'interact_with_EC':
            False
        }

        list_slices_this_bunch = [slice_tail] + sliced + [slice_head]

    else:
        # Build a copy of the bunch
        copy_this_bunch = Particles(
            macroparticlenumber=this_bunch.macroparticlenumber,
            particlenumber_per_mp=this_bunch.particlenumber_per_mp,
            charge=this_bunch.charge,
            mass=this_bunch.mass,
            circumference=this_bunch.circumference,
            gamma=this_bunch.gamma,
            coords_n_momenta_dict=this_bunch.get_coords_n_momenta_dict())
        copy_this_bunch.slice_info = {
            kk: this_bunch.slice_info[kk]
            for kk in list(this_bunch.slice_info.keys())
        }

        list_slices_this_bunch = [copy_this_bunch]

    for i_sl, ss in enumerate(
            list_slices_this_bunch[::-1]):  # I want slice 0 to be at the head
        ss.slice_info['info_parent_bunch'] = {
            kk: this_bunch.slice_info[kk]
            for kk in list(this_bunch.slice_info.keys())
        }
        ss.slice_info['i_slice'] = i_sl
        ss.slice_info['N_slices_tot_bunch'] = len(list_slices_this_bunch)

    return list_slices_this_bunch
Beispiel #19
0
                                 beta_x[0], beta_y[0], beta_z, epsn_x, epsn_y,
                                 epsn_z)
xoffset = 0e-4
yoffset = 0e-4
bunch.x += xoffset
bunch.y += yoffset

afile = open('bunch', 'wb')
pickle.dump(bunch, afile)
afile.close()

# SLICER FOR WAKEFIELDS
# =====================
n_slices = 500  # 500
slicer_for_wakefields = UniformBinSlicer(
    n_slices,
    z_cuts=(-4. * sigma_z,
            4. * sigma_z))  #,circumference=circumference, h_bunch=h1)

# WAKEFIELD
# ==========
n_turns_wake = 1  # for the moment we consider that the wakefield decays after 1 turn
wakefile1 = (
    '/afs/cern.ch/work/n/natriant/private/pyheadtail_example_crabcavity/wakefields/SPS_complete_wake_model_2018_Q26.txt'
)
ww1 = WakeTable(
    wakefile1,
    ['time', 'dipole_x', 'dipole_y', 'quadrupole_x', 'quadrupole_y'],
    n_turns_wake=n_turns_wake)
# only dipolar kick
#my_length = len(ww1.wake_table['quadrupole_x'])
#ww1.wake_table['quadrupole_x'] = np.zeros(my_length)
Beispiel #20
0
def gen_matched_multibunch_beam(machine, n_macroparticles_per_bunch,
                                filling_pattern, b_spac_s, bunch_intensity,
                                epsn_x, epsn_y, sigma_z,
                                non_linear_long_matching, min_inten_slice4EC):

    bucket_length_m = machine.circumference / (
        machine.longitudinal_map.harmonics[0])
    b_spac_m = b_spac_s * machine.beta * clight
    b_spac_buckets = np.round(b_spac_m / bucket_length_m)

    if np.abs(b_spac_buckets * bucket_length_m - b_spac_m) / b_spac_m > 0.03:
        raise ValueError(
            'Bunch spacing is not a multiple of the bucket length!')

    if non_linear_long_matching:
        generate_bunch = machine.generate_6D_Gaussian_bunch_matched
    else:
        generate_bunch = machine.generate_6D_Gaussian_bunch

    list_genbunches = []
    for i_slot, inten_slot in enumerate(filling_pattern):
        if inten_slot > 0:
            print(('Generating bunch at slot %d/%d' %
                   (i_slot, len(filling_pattern))))
            bunch = generate_bunch(n_macroparticles_per_bunch,
                                   inten_slot * bunch_intensity,
                                   epsn_x,
                                   epsn_y,
                                   sigma_z=sigma_z)
            bunch.z -= b_spac_buckets * bucket_length_m * i_slot
            list_genbunches.append(bunch)

    beam = sum(list_genbunches)

    bucket = machine.longitudinal_map.get_bucket(gamma=machine.gamma,
                                                 mass=machine.mass,
                                                 charge=machine.charge)
    # z_beam_center = bucket.z_ufp_separatrix + bucket_length - self.circumference/2.

    # Here the center of the bucket
    bucket.z_sfp

    # I want to re-separate the bunches
    buncher = UniformBinSlicer(
        n_slices=0,
        z_sample_points=np.arange(
            bucket.z_sfp -
            len(filling_pattern) * bucket_length_m * b_spac_buckets,
            bucket.z_sfp + bucket_length_m, bucket_length_m * b_spac_buckets))
    buncher_slice_set = beam.get_slices(buncher, statistics=True)
    list_bunches = beam.extract_slices(buncher, include_non_sliced='never')
    # The bunch at position 0 is the tail

    # If last bunch is empty remove it
    if (list_bunches[0].intensity < min_inten_slice4EC):
        list_bunches = list_bunches[1:]

    # Add further information to bunches
    for i_bb, bb in enumerate(
            list_bunches[::-1]):  # I want bunch 0 at the head of the train
        slice4EC = bb.intensity > min_inten_slice4EC
        bb.slice_info['slice_4_EC'] = slice4EC
        bb.slice_info['interact_with_EC'] = slice4EC
        bb.slice_info['N_bunches_tot_beam'] = len(list_bunches)
        bb.slice_info['i_bunch'] = i_bb
        bb.slice_info['i_turn'] = 0

    return list_bunches
bunch.y[:n_part_per_turn] = dict_HT['y0_HT']
bunch.yp[:n_part_per_turn] = dict_HT['yp0_HT']
bunch.z[:n_part_per_turn] = dict_HT['z0_HT']
bunch.dp[:n_part_per_turn] = dict_HT['dp0_HT']
n_turns = dict_HT['n_turns']


# define apertures and Dh_sc to simulate headtail conditions
x_aper = 20 * sigma_x
y_aper = 20 * sigma_y
Dh_sc = 2 * x_aper / 128 / 2.

# ecloud
import PyECLOUD.PyEC4PyHT as PyEC4PyHT
from PyHEADTAIL.particles.slicing import UniformBinSlicer
slicer = UniformBinSlicer(n_slices=64, z_cuts=(-3 * bunch.sigma_z(), 3 * bunch.sigma_z()))


init_unif_edens_flag = 1
init_unif_edens = 1e11
N_MP_ele_init = 100000
N_mp_max = N_MP_ele_init * 4.

nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init

new_one_turn_map = []
ecloud_list = []
for ele in machine.one_turn_map:
    new_one_turn_map.append(ele)
    if ele in machine.transverse_map:
        new_ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True,
    def init_all(self):

        self.n_slices = pp.n_slices

        # read the optics if needed
        if pp.optics_pickle_file is not None:
            with open(pp.optics_pickle_file) as fid:
                optics = pickle.load(fid)
                self.n_kick_smooth = np.sum(
                    ['_kick_smooth_' in nn for nn in optics['name']])
        else:
            optics = None
            self.n_kick_smooth = pp.n_segments

        # define the machine
        from LHC_custom import LHC
        self.machine = LHC(n_segments=pp.n_segments,
                           machine_configuration=pp.machine_configuration,
                           beta_x=pp.beta_x,
                           beta_y=pp.beta_y,
                           accQ_x=pp.Q_x,
                           accQ_y=pp.Q_y,
                           Qp_x=pp.Qp_x,
                           Qp_y=pp.Qp_y,
                           octupole_knob=pp.octupole_knob,
                           optics_dict=optics,
                           V_RF=pp.V_RF)
        self.n_segments = self.machine.transverse_map.n_segments

        # compute sigma
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x_inj = np.sqrt(inj_opt['beta_x'] * pp.epsn_x /
                              self.machine.betagamma)
        sigma_y_inj = np.sqrt(inj_opt['beta_y'] * pp.epsn_y /
                              self.machine.betagamma)

        if pp.optics_pickle_file is None:
            sigma_x_smooth = sigma_x_inj
            sigma_y_smooth = sigma_y_inj
        else:
            beta_x_smooth = None
            beta_y_smooth = None
            for ele in self.machine.one_turn_map:
                if ele in self.machine.transverse_map:
                    if '_kick_smooth_' in ele.name1:
                        if beta_x_smooth is None:
                            beta_x_smooth = ele.beta_x1
                            beta_y_smooth = ele.beta_y1
                        else:
                            if beta_x_smooth != ele.beta_x1 or beta_y_smooth != ele.beta_y1:
                                raise ValueError(
                                    'Smooth kicks must have all the same beta')

            if beta_x_smooth is None:
                sigma_x_smooth = None
                sigma_y_smooth = None
            else:
                sigma_x_smooth = np.sqrt(beta_x_smooth * pp.epsn_x /
                                         self.machine.betagamma)
                sigma_y_smooth = np.sqrt(beta_y_smooth * pp.epsn_y /
                                         self.machine.betagamma)

        # define MP size
        nel_mp_ref_0 = pp.init_unif_edens_dip * 4 * pp.x_aper * pp.y_aper / pp.N_MP_ele_init_dip

        # prepare e-cloud
        import PyECLOUD.PyEC4PyHT as PyEC4PyHT

        if pp.custom_target_grid_arcs is not None:
            target_grid_arcs = pp.custom_target_grid_arcs
        else:
            target_grid_arcs = {
                'x_min_target':
                -pp.target_size_internal_grid_sigma * sigma_x_smooth,
                'x_max_target':
                pp.target_size_internal_grid_sigma * sigma_x_smooth,
                'y_min_target':
                -pp.target_size_internal_grid_sigma * sigma_y_smooth,
                'y_max_target':
                pp.target_size_internal_grid_sigma * sigma_y_smooth,
                'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_x_smooth
            }
        self.target_grid_arcs = target_grid_arcs

        if pp.enable_arc_dip:
            ecloud_dip = PyEC4PyHT.Ecloud(
                slice_by_slice_mode=True,
                L_ecloud=self.machine.circumference / self.n_kick_smooth *
                pp.fraction_device_dip,
                slicer=None,
                Dt_ref=pp.Dt_ref,
                pyecl_input_folder=pp.pyecl_input_folder,
                chamb_type=pp.chamb_type,
                x_aper=pp.x_aper,
                y_aper=pp.y_aper,
                filename_chm=pp.filename_chm,
                PyPICmode=pp.PyPICmode,
                Dh_sc=pp.Dh_sc_ext,
                N_min_Dh_main=pp.N_min_Dh_main,
                f_telescope=pp.f_telescope,
                N_nodes_discard=pp.N_nodes_discard,
                target_grid=target_grid_arcs,
                init_unif_edens_flag=pp.init_unif_edens_flag_dip,
                init_unif_edens=pp.init_unif_edens_dip,
                N_mp_max=pp.N_mp_max_dip,
                nel_mp_ref_0=nel_mp_ref_0,
                B_multip=pp.B_multip_dip,
                enable_kick_x=pp.enable_kick_x,
                enable_kick_y=pp.enable_kick_y)

        if pp.enable_arc_quad:
            ecloud_quad = PyEC4PyHT.Ecloud(
                slice_by_slice_mode=True,
                L_ecloud=self.machine.circumference / self.n_kick_smooth *
                pp.fraction_device_quad,
                slicer=None,
                Dt_ref=pp.Dt_ref,
                pyecl_input_folder=pp.pyecl_input_folder,
                chamb_type=pp.chamb_type,
                x_aper=pp.x_aper,
                y_aper=pp.y_aper,
                filename_chm=pp.filename_chm,
                PyPICmode=pp.PyPICmode,
                Dh_sc=pp.Dh_sc_ext,
                N_min_Dh_main=pp.N_min_Dh_main,
                f_telescope=pp.f_telescope,
                N_nodes_discard=pp.N_nodes_discard,
                target_grid=target_grid_arcs,
                N_mp_max=pp.N_mp_max_quad,
                nel_mp_ref_0=nel_mp_ref_0,
                B_multip=pp.B_multip_quad,
                filename_init_MP_state=pp.filename_init_MP_state_quad,
                enable_kick_x=pp.enable_kick_x,
                enable_kick_y=pp.enable_kick_y)

        if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_dip:
            with open('multigrid_config_dip.txt', 'w') as fid:
                if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'):
                    fid.write(repr(ecloud_dip.spacech_ele.PyPICobj.grids))
                else:
                    fid.write("Single grid.")

            with open('multigrid_config_dip.pkl', 'w') as fid:
                if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'):
                    pickle.dump(ecloud_dip.spacech_ele.PyPICobj.grids, fid)
                else:
                    pickle.dump('Single grid.', fid)

        if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_quad:
            with open('multigrid_config_quad.txt', 'w') as fid:
                if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'):
                    fid.write(repr(ecloud_quad.spacech_ele.PyPICobj.grids))
                else:
                    fid.write("Single grid.")

            with open('multigrid_config_quad.pkl', 'w') as fid:
                if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'):
                    pickle.dump(ecloud_quad.spacech_ele.PyPICobj.grids, fid)
                else:
                    pickle.dump('Single grid.', fid)

        # setup transverse losses (to "protect" the ecloud)
        import PyHEADTAIL.aperture.aperture as aperture
        apt_xy = aperture.EllipticalApertureXY(
            x_aper=pp.target_size_internal_grid_sigma * sigma_x_inj,
            y_aper=pp.target_size_internal_grid_sigma * sigma_y_inj)
        self.machine.one_turn_map.append(apt_xy)

        if pp.enable_transverse_damper:
            # setup transverse damper
            from PyHEADTAIL.feedback.transverse_damper import TransverseDamper
            damper = TransverseDamper(dampingrate_x=pp.dampingrate_x,
                                      dampingrate_y=pp.dampingrate_y)
            self.machine.one_turn_map.append(damper)

        # We suppose that all the object that cannot be slice parallelized are at the end of the ring
        i_end_parallel = len(
            self.machine.one_turn_map) - pp.n_non_parallelizable

        # split the machine
        sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes)
        myid = self.ring_of_CPUs.myid
        i_start_part, i_end_part = sharing.my_part(myid)
        self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]
        if self.ring_of_CPUs.I_am_a_worker:
            print 'I am id=%d/%d (worker) and my part is %d long' % (
                myid, self.ring_of_CPUs.N_nodes, len(self.mypart))
        elif self.ring_of_CPUs.I_am_the_master:
            self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]
            print 'I am id=%d/%d (master) and my part is %d long' % (
                myid, self.ring_of_CPUs.N_nodes, len(self.mypart))

        #install eclouds in my part
        my_new_part = []
        self.my_list_eclouds = []
        for ele in self.mypart:
            my_new_part.append(ele)
            if ele in self.machine.transverse_map:
                if pp.optics_pickle_file is None or '_kick_smooth_' in ele.name1:
                    if pp.enable_arc_dip:
                        ecloud_dip_new = ecloud_dip.generate_twin_ecloud_with_shared_space_charge(
                        )
                        my_new_part.append(ecloud_dip_new)
                        self.my_list_eclouds.append(ecloud_dip_new)
                    if pp.enable_arc_quad:
                        ecloud_quad_new = ecloud_quad.generate_twin_ecloud_with_shared_space_charge(
                        )
                        my_new_part.append(ecloud_quad_new)
                        self.my_list_eclouds.append(ecloud_quad_new)
                elif '_kick_element_' in ele.name1 and pp.enable_eclouds_at_kick_elements:

                    i_in_optics = list(optics['name']).index(ele.name1)
                    kick_name = optics['name'][i_in_optics]
                    element_name = kick_name.split('_kick_element_')[-1]
                    L_curr = optics['L_interaction'][i_in_optics]

                    buildup_folder = pp.path_buildup_simulations_kick_elements.replace(
                        '!!!NAME!!!', element_name)
                    chamber_fname = '%s_chamber.mat' % (element_name)

                    B_multip_curr = [0., optics['gradB'][i_in_optics]]

                    x_beam_offset = optics['x'][i_in_optics] * pp.orbit_factor
                    y_beam_offset = optics['y'][i_in_optics] * pp.orbit_factor

                    sigma_x_local = np.sqrt(optics['beta_x'][i_in_optics] *
                                            pp.epsn_x / self.machine.betagamma)
                    sigma_y_local = np.sqrt(optics['beta_y'][i_in_optics] *
                                            pp.epsn_y / self.machine.betagamma)

                    ecloud_ele = PyEC4PyHT.Ecloud(
                        slice_by_slice_mode=True,
                        L_ecloud=L_curr,
                        slicer=None,
                        Dt_ref=pp.Dt_ref,
                        pyecl_input_folder=pp.pyecl_input_folder,
                        chamb_type='polyg',
                        x_aper=None,
                        y_aper=None,
                        filename_chm=buildup_folder + '/' + chamber_fname,
                        PyPICmode=pp.PyPICmode,
                        Dh_sc=pp.Dh_sc_ext,
                        N_min_Dh_main=pp.N_min_Dh_main,
                        f_telescope=pp.f_telescope,
                        N_nodes_discard=pp.N_nodes_discard,
                        target_grid={
                            'x_min_target':
                            -pp.target_size_internal_grid_sigma * sigma_x_local
                            + x_beam_offset,
                            'x_max_target':
                            pp.target_size_internal_grid_sigma * sigma_x_local
                            + x_beam_offset,
                            'y_min_target':
                            -pp.target_size_internal_grid_sigma * sigma_y_local
                            + y_beam_offset,
                            'y_max_target':
                            pp.target_size_internal_grid_sigma * sigma_y_local
                            + y_beam_offset,
                            'Dh_target':
                            pp.target_Dh_internal_grid_sigma * sigma_y_local
                        },
                        N_mp_max=pp.N_mp_max_quad,
                        nel_mp_ref_0=nel_mp_ref_0,
                        B_multip=B_multip_curr,
                        filename_init_MP_state=buildup_folder + '/' +
                        pp.name_MP_state_file_kick_elements,
                        x_beam_offset=x_beam_offset,
                        y_beam_offset=y_beam_offset,
                        enable_kick_x=pp.enable_kick_x,
                        enable_kick_y=pp.enable_kick_y)

                    my_new_part.append(ecloud_ele)
                    self.my_list_eclouds.append(ecloud_ele)

        self.mypart = my_new_part

        if pp.footprint_mode:
            print 'Proc. %d computing maps' % myid
            # generate a bunch
            bunch_for_map = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles_for_footprint_map,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)

            # Slice the bunch
            slicer_for_map = UniformBinSlicer(n_slices=pp.n_slices,
                                              z_cuts=(-pp.z_cut, pp.z_cut))
            slices_list_for_map = bunch_for_map.extract_slices(slicer_for_map)

            #Track the previous part of the machine
            for ele in self.machine.one_turn_map[:i_start_part]:
                for ss in slices_list_for_map:
                    ele.track(ss)

            # Measure optics, track and replace clouds with maps
            list_ele_type = []
            list_meas_beta_x = []
            list_meas_alpha_x = []
            list_meas_beta_y = []
            list_meas_alpha_y = []
            for ele in self.mypart:
                list_ele_type.append(str(type(ele)))
                # Measure optics
                bbb = sum(slices_list_for_map)
                list_meas_beta_x.append(bbb.beta_Twiss_x())
                list_meas_alpha_x.append(bbb.alpha_Twiss_x())
                list_meas_beta_y.append(bbb.beta_Twiss_y())
                list_meas_alpha_y.append(bbb.alpha_Twiss_y())

                if ele in self.my_list_eclouds:
                    ele.track_once_and_replace_with_recorded_field_map(
                        slices_list_for_map)
                else:
                    for ss in slices_list_for_map:
                        ele.track(ss)
            print 'Proc. %d done with maps' % myid

            with open('measured_optics_%d.pkl' % myid, 'wb') as fid:
                pickle.dump(
                    {
                        'ele_type': list_ele_type,
                        'beta_x': list_meas_beta_x,
                        'alpha_x': list_meas_alpha_x,
                        'beta_y': list_meas_beta_y,
                        'alpha_y': list_meas_alpha_y,
                    }, fid)

            #remove RF
            if self.ring_of_CPUs.I_am_the_master:
                self.non_parallel_part.remove(self.machine.longitudinal_map)
Beispiel #23
0
class TestSlicing(unittest.TestCase):

    def setUp(self):
        #beam parameters
        self.intensity = 1.234e9
        self.circumference = 111.
        self.gamma = 20.1

        #simulation parameters
        self.macroparticlenumber = 100
         #must be multiple of nslices
        self.particlenumber_per_mp = self.intensity/self.macroparticlenumber

        #create a bunch
        self.bunch = self.create_bunch()

        #create a params for slicers
        self.nslices = 5
        self.z_cuts = (-20.,30.) #asymmetric to check uniform_charge_slicer
        self.n_sigma_z = 5
        self.basic_slicer = UniformBinSlicer(self.nslices,
                                             z_cuts=self.z_cuts)
        self.basic_slice_set = self.basic_slicer.slice(self.bunch)

    def tearDown(self):
        pass

    def test_long_cuts(self):
        '''Tests whether the z_cuts are initialized correctly'''
        (cut_tail, cut_head) = self.basic_slicer.get_long_cuts(self.bunch)
        self.assertAlmostEqual(self.z_cuts[0], cut_tail,
                               'get_long_cuts incorrect (tail cut)')
        self.assertAlmostEqual(self.z_cuts[1], cut_head,
                               'get_long_cuts incorrect (head cut)')

    def test_z_cuts_warning(self):
        '''Tests whether a warning is raised whenever
        z_cut_tail >= z_cut_head
        '''
        inverse_z_cuts = (-0.1,-0.3)
        warnings = AccumulatorPrinter()
        slicer = UniformBinSlicer(self.nslices, z_cuts = inverse_z_cuts,
                                  warningprinter=warnings)
        self.assertTrue(len(warnings.log) > 0,
                        'no warning generated when z_cut head < z_cut tail')

    def test_equality(self):
        '''Tests whether two slicers with the same config are equal
        in the sense of the == and != operator (calling __eq__, __ne__)
        '''
        unif_bin_slicer = UniformBinSlicer(self.nslices, z_cuts=self.z_cuts)
        unif_bin_slicer2 = UniformBinSlicer(self.nslices, z_cuts=self.z_cuts)
        self.assertTrue(unif_bin_slicer == unif_bin_slicer2,
                        'comparing two uniform bin slicers with '+
                        'identical config using == returns False')
        self.assertFalse(unif_bin_slicer != unif_bin_slicer2,
                         'comparing two uniform bin slicers with '+
                         'identical config using != returns True')

    def test_unif_charge_slicer(self):
        '''Tests whether the charges are equally distributed between
        the charges. Only works if nslices divides macroparticlenumber
        '''
        unif_charge_slicer = UniformChargeSlicer(n_slices=self.nslices,
                                                 z_cuts=self.z_cuts)
        slice_set = unif_charge_slicer.slice(self.bunch)
        p_per_slice = slice_set.n_macroparticles_per_slice
        if (self.macroparticlenumber % self.nslices == 0):
            self.assertTrue(check_elements_equal(p_per_slice),
                            'slices in UniformChargeSlicer don\'t have' +
                            'the same number of macroparticles in them')
        else :
            print('test_unif_charge_slicer() not run because ' +
                  'uniform charge distribution is impossible ' +
                  '(nparticles[' + str(self.macroparticlenumber) +
                  '] % nslices[' + str(self.nslices) + '] != 0)')

    def test_sliceset_macroparticles(self):
        '''Tests whether the sum of all particles per slice
        is equal to the specified number of macroparticles when specifying
        z_cuts which lie outside of the bunch
        '''
        #create a bunch and a slice set encompassing the whole bunch
        z_min, z_max = -2., 2.
        bunch = self.create_bunch(zmin=z_min, zmax=z_max)
        z_cuts = (z_min-1,z_max+1)
        slice_set = UniformChargeSlicer(n_slices=self.nslices,
                                        z_cuts=z_cuts).slice(bunch)
        n_particles = sum(slice_set.n_macroparticles_per_slice)
        self.assertEqual(self.macroparticlenumber, n_particles,
                         'the SliceSet lost/added some particles')

    def test_add_statistics(self):
        """ Tests whether any error gets thrown when calling the statistics
        functions of the slicer. Does not do any specific tests
        """
        self.basic_slicer.add_statistics(self.basic_slice_set, self.bunch, True)
        self.basic_slice_set.mean_x
        self.basic_slice_set.eff_epsn_y

    def test_emittance_no_dispersion(self):
        """ Tests whether the effective emittance and emittance are the same
        for a beam with no dispersion effects
        """
        bunch = self.create_bunch_with_params(1, 42, 0., 20)
        slice_set = self.basic_slicer.slice(bunch)
        self.basic_slicer.add_statistics(slice_set, bunch, True)
        for n in xrange(self.nslices):
            self.assertAlmostEqual(slice_set.epsn_x[n],
                                   slice_set.eff_epsn_x[n],
                                   places=4,
                                   msg='The effective emittance is not the ' +
                                   'same as the emittance for no dispersion')


    # exclude this test for now, fails at the moment but not clear whether
    # this should be changed
    #def test_sliceset_dimensions(self):
    #    '''Tests whether the dimensions of several slice_set properties
    #    match the specified number of slices
    #    '''
    #    self.assertTrue(self.basic_slice_set.slice_widths.size ==
    #                    self.nslices, 'slice_widths has wrong dimension')
    #    #print(self.basic_slice_set.slice_positions)
    #    self.assertTrue(self.basic_slice_set.slice_positions.size ==
    #                    self.nslices, 'slice_positions has wrong dimension')

    def create_bunch(self, zmin=-1., zmax=1.):
        z = np.linspace(zmin, zmax, num=self.macroparticlenumber)
        y = np.copy(z)
        x = np.copy(z)
        xp = np.linspace(-0.5, 0.5, num=self.macroparticlenumber)
        yp = np.copy(xp)
        dp = np.copy(xp)
        coords_n_momenta_dict = {
            'x': x, 'y': y, 'z': z,
            'xp': xp, 'yp': yp, 'dp': dp
        }
        return Particles(
            self.macroparticlenumber, self.particlenumber_per_mp, e, m_p,
            self.circumference, self.gamma, coords_n_momenta_dict
        )

    def create_bunch_with_params(self,alpha_x, beta_x, disp_x, gamma):
        np.random.seed(0)
        beta_y = beta_x
        alpha_y = alpha_x
        disp_y = disp_x
        alpha0= [0.00308]
        C = 6911.
        Qs = 0.017
        epsn_x = 3.75e-6
        epsn_y = 3.75e-6
        linear_map = LinearMap(alpha0, Qs, C)
       # then transform...
        intensity = 1.05e11
        sigma_z = 0.23
        gamma_t = 1. / np.sqrt(linear_map.alpha_array[0])
        p0 = np.sqrt(gamma**2 - 1) * m_p * c

        beta_z = np.abs((linear_map.eta(dp=0, gamma=gamma) * linear_map.circumference /
                  (2 * np.pi * linear_map.Qs)))

        epsn_z = 4 * np.pi * sigma_z**2 * p0 / (beta_z * e)
        #print ('epsn_z: ' + str(epsn_z))
        bunch = generate_Gaussian6DTwiss(
            macroparticlenumber=10000, intensity=intensity, charge=e,
            gamma=gamma, mass=m_p, circumference=linear_map.circumference,
            alpha_x=0., beta_x=1., epsn_x=epsn_x,
            alpha_y=0., beta_y=1., epsn_y=epsn_y,
            beta_z=beta_z, epsn_z=epsn_z)
        # Scale to correct beta and alpha
        xx = bunch.x.copy()
        yy = bunch.y.copy()
        bunch.x *= np.sqrt(beta_x)
        bunch.xp = -alpha_x/np.sqrt(beta_x) * xx + 1./np.sqrt(beta_x) * bunch.xp
        bunch.y *= np.sqrt(beta_y)
        bunch.yp = -alpha_y/np.sqrt(beta_y) * yy + 1./np.sqrt(beta_y) * bunch.yp
        bunch.x += disp_x * bunch.dp
        bunch.y += disp_y * bunch.dp
        return bunch
def run(intensity, chroma=0, i_oct=0):
    '''Arguments:
        - intensity: integer number of charges in beam
        - chroma: first-order chromaticity Q'_{x,y}, identical
          for both transverse planes
        - i_oct: octupole current in A (positive i_oct means
          LOF = i_oct > 0 and LOD = -i_oct < 0)
    '''

    # BEAM AND MACHINE PARAMETERS
    # ============================
    from LHC import LHC
    # energy set above will enter get_nonlinear_params p0
    assert machine_configuration == 'LHC-injection'
    machine = LHC(n_segments=1,
                  machine_configuration=machine_configuration,
                  **get_nonlinear_params(chroma=chroma,
                                         i_oct=i_oct,
                                         p0=0.45e12 * e / c))

    # BEAM
    # ====
    epsn_x = 3.e-6  # normalised horizontal emittance
    epsn_y = 3.e-6  # normalised vertical emittance
    sigma_z = 1.2e-9 * machine.beta * c / 4.  # RMS bunch length in meters

    bunch = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                               intensity,
                                               epsn_x,
                                               epsn_y,
                                               sigma_z=sigma_z,
                                               matched=True)

    print("\n--> Bunch length and emittance: {:g} m, {:g} eVs.".format(
        bunch.sigma_z(), bunch.epsn_z()))

    # CREATE BEAM SLICERS
    # ===================
    slicer_for_slicemonitor = UniformBinSlicer(50,
                                               z_cuts=(-3 * sigma_z,
                                                       3 * sigma_z))
    slicer_for_wakefields = UniformBinSlicer(
        50,
        z_cuts=(-3 * sigma_z, 3 * sigma_z),
        circumference=machine.circumference,
        h_bunch=machine.h_bunch)
    print("Slice")

    # CREATE WAKES
    # ============
    wake_table1 = WakeTable(
        wakefile,
        [
            'time',
            'dipole_x',
            'dipole_y',
            # 'quadrupole_x', 'quadrupole_y',
            'noquadrupole_x',
            'noquadrupole_y',
            # 'dipole_xy', 'dipole_yx',
            'nodipole_xy',
            'nodipole_yx',
        ])
    wake_field = WakeField(slicer_for_wakefields, wake_table1, mpi=True)

    # CREATE DAMPER
    # =============
    dampingtime = 50.
    gain = 2. / dampingtime
    damper = IdealBunchFeedback(gain)

    # CREATE MONITORS
    # ===============
    try:
        bucket = machine.longitudinal_map.get_bucket(bunch)
    except AttributeError:
        bucket = machine.rfbucket

    simulation_parameters_dict = {
        'gamma': machine.gamma,
        'intensity': intensity,
        'Qx': machine.Q_x,
        'Qy': machine.Q_y,
        'Qs': bucket.Q_s,
        'beta_x': bunch.beta_Twiss_x(),
        'beta_y': bunch.beta_Twiss_y(),
        'beta_z': bucket.beta_z,
        'epsn_x': bunch.epsn_x(),
        'epsn_y': bunch.epsn_y(),
        'sigma_z': bunch.sigma_z(),
    }
    bunchmonitor = BunchMonitor(
        outputpath + '/bunchmonitor_{:04d}_chroma={:g}'.format(it, chroma),
        n_turns,
        simulation_parameters_dict,
        write_buffer_to_file_every=512,
        buffer_size=4096)
    slicemonitor = SliceMonitor(
        outputpath + '/slicemonitor_{:04d}_chroma={:g}'.format(it, chroma),
        n_turns_slicemon,
        slicer_for_slicemonitor,
        simulation_parameters_dict,
        write_buffer_to_file_every=1,
        buffer_size=n_turns_slicemon)

    # TRACKING LOOP
    # =============
    # machine.one_turn_map.append(damper)
    machine.one_turn_map.append(wake_field)

    # for slice statistics monitoring:
    s_cnt = 0
    monitorswitch = False

    print('\n--> Begin tracking...\n')

    # GO!!!
    for i in range(n_turns):

        t0 = time.clock()

        # track the beam around the machine for one turn:
        machine.track(bunch)

        ex, ey, ez = bunch.epsn_x(), bunch.epsn_y(), bunch.epsn_z()
        mx, my, mz = bunch.mean_x(), bunch.mean_y(), bunch.mean_z()

        # monitor the bunch statistics (once per turn):
        bunchmonitor.dump(bunch)

        # if the centroid becomes unstable (>1cm motion)
        # then monitor the slice statistics:
        if not monitorswitch:
            if mx > 1e-2 or my > 1e-2 or i > n_turns - n_turns_slicemon:
                print("--> Activate slice monitor")
                monitorswitch = True
        else:
            if s_cnt < n_turns_slicemon:
                slicemonitor.dump(bunch)
                s_cnt += 1

        # stop the tracking as soon as we have not-a-number values:
        if not all(np.isfinite(c) for c in [ex, ey, ez, mx, my, mz]):
            print('*** STOPPING SIMULATION: non-finite bunch stats!')
            break

        # print status all 1000 turns:
        if i % 100 == 0:
            t1 = time.clock()
            print('Emittances: ({:.3g}, {:.3g}, {:.3g}) '
                  '& Centroids: ({:.3g}, {:.3g}, {:.3g})'
                  '@ turn {:d}, {:g} ms, {:s}'.format(
                      ex, ey, ez, mx, my, mz, i, (t1 - t0) * 1e3,
                      time.strftime("%d/%m/%Y %H:%M:%S", time.localtime())))

    print('\n*** Successfully completed!')
Beispiel #25
0
def run(job_id, accQ_y, accQ_x, phase_filter_x, phase_filter_y, damping_time,
        total_filter_delay, optics_file, phase_x_col, beta_x_col, phase_y_col,
        beta_y_col, list_of_systems):
    # Main simulation file

    #    job_id = 0
    chroma = 0
    intensity = 1e11
    gain = 2. / damping_time
    charge = e
    mass = m_p

    alpha = 5.034**-2

    p0 = 300e6 * e / c
    Q_s = 0.02
    circumference = 160.
    s = None
    alpha_x = None
    alpha_y = None
    beta_x = circumference / (2. * np.pi * accQ_x)
    beta_y = circumference / (2. * np.pi * accQ_y)
    D_x = 0
    D_y = 0
    optics_mode = 'smooth'
    name = None
    n_segments = 1

    # detunings
    Qp_x = chroma
    Qp_y = chroma

    app_x = 0
    app_y = 0
    app_xy = 0

    longitudinal_mode = 'linear'

    #    h_bunch = h_RF
    h_RF = 2
    h_bunch = 2
    wrap_z = False

    machine = Synchrotron(optics_mode=optics_mode,
                          circumference=circumference,
                          n_segments=n_segments,
                          s=s,
                          name=name,
                          alpha_x=alpha_x,
                          beta_x=beta_x,
                          D_x=D_x,
                          alpha_y=alpha_y,
                          beta_y=beta_y,
                          D_y=D_y,
                          accQ_x=accQ_x,
                          accQ_y=accQ_y,
                          Qp_x=Qp_x,
                          Qp_y=Qp_y,
                          app_x=app_x,
                          app_y=app_y,
                          app_xy=app_xy,
                          alpha_mom_compaction=alpha,
                          longitudinal_mode=longitudinal_mode,
                          h_RF=np.atleast_1d(h_RF),
                          p0=p0,
                          charge=charge,
                          mass=mass,
                          wrap_z=wrap_z,
                          Q_s=Q_s)

    beta_x = machine.transverse_map.beta_x[0]
    beta_y = machine.transverse_map.beta_y[0]

    # BEAM
    # ====
    epsn_x = 300e-6
    epsn_y = 300e-6
    sigma_z = 450e-9 * c * machine.beta
    bunch = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                               intensity,
                                               epsn_x,
                                               epsn_y,
                                               sigma_z=sigma_z)
    init_offset = 1e-2
    bunch.x = bunch.x + init_offset
    bunch.y = bunch.y + init_offset

    # CREATE BEAM SLICERS
    # ===================
    slicer_for_wakefields = UniformBinSlicer(50,
                                             z_cuts=(-4 * sigma_z,
                                                     4 * sigma_z))

    additional_filter_delay = total_filter_delay - 1

    Q_x = accQ_x
    Q_y = accQ_y

    pyHT_obj_list = []
    map_element_list = []

    gain_divider = 1.

    for i, system in enumerate(list_of_systems):
        object_locations = find_object_locations(system, optics_file,
                                                 phase_x_col, beta_x_col,
                                                 phase_y_col, beta_y_col)

        plane = system[0][1]
        for j in range(len(system) - 1):
            if plane == 'x':
                phase_advance = object_locations[0][1] - object_locations[j +
                                                                          1][1]
            elif plane == 'y':
                phase_advance = object_locations[0][3] - object_locations[j +
                                                                          1][3]
            else:
                raise ValueError('Unknown plane')

            if phase_advance >= 0.:
                delay = additional_filter_delay + 1
            else:
                delay = additional_filter_delay

            phase_advance = phase_advance + 0.25

            kicker_processors = [Bypass()]

            if plane == 'x':

                pickup_processors = [
                    ChargeWeighter(normalization='segment_average'),
                    Average(avg_type='total'),
                    Register(8, Q_x, delay)
                ]
                kicker_registers = [pickup_processors[-1]]
                combiner = (FIRCombiner(phase_filter_x,
                                        kicker_registers,
                                        0.,
                                        beta_x,
                                        beta_conversion='90_deg'), None)
                pickup = PickUp(slicer_for_wakefields, pickup_processors, None,
                                object_locations[j + 1][1], beta_x,
                                object_locations[j + 1][3], beta_y)
                kicker = Kicker(gain / gain_divider,
                                slicer_for_wakefields,
                                kicker_processors,
                                None,
                                kicker_registers,
                                None,
                                object_locations[0][1],
                                beta_x,
                                object_locations[0][3],
                                beta_y,
                                combiner=combiner)

            elif plane == 'y':
                pickup_processors = [
                    ChargeWeighter(normalization='segment_average'),
                    Average(avg_type='total'),
                    Register(8, Q_y, delay)
                ]
                kicker_registers = [pickup_processors[-1]]
                combiner = (None,
                            FIRCombiner(phase_filter_y,
                                        kicker_registers,
                                        0.,
                                        beta_y,
                                        beta_conversion='90_deg'))
                pickup = PickUp(slicer_for_wakefields, None, pickup_processors,
                                object_locations[j + 1][1], beta_x,
                                object_locations[j + 1][3], beta_y)
                kicker = Kicker(gain / gain_divider,
                                slicer_for_wakefields,
                                None,
                                kicker_processors,
                                None,
                                kicker_registers,
                                object_locations[0][1],
                                beta_x,
                                object_locations[0][3],
                                beta_y,
                                combiner=combiner)

            pyHT_obj_list.append(pickup)
            pyHT_obj_list.append(kicker)
            map_element_list.append(system[j + 1])
            map_element_list.append(system[0])

    new_map = generate_one_turn_map(machine.one_turn_map,
                                    map_element_list,
                                    pyHT_obj_list,
                                    optics_file,
                                    phase_x_col,
                                    beta_x_col,
                                    phase_y_col,
                                    beta_y_col,
                                    machine.circumference,
                                    alpha_x=None,
                                    beta_x=beta_x,
                                    D_x=0.,
                                    alpha_y=None,
                                    beta_y=beta_y,
                                    D_y=0.,
                                    accQ_x=accQ_x,
                                    accQ_y=accQ_y,
                                    Qp_x=chroma,
                                    Qp_y=chroma,
                                    app_x=0.,
                                    app_y=0.,
                                    app_xy=0.,
                                    other_detuners=[],
                                    use_cython=False)
    machine.one_turn_map = new_map

    output_data = np.zeros((n_turns, 3))
    for i in range(n_turns):
        t0 = time.clock()
        output_data[i, 0] = i
        output_data[i, 1] = bunch.mean_x()
        output_data[i, 2] = bunch.mean_y()

        machine.track(bunch)
        if (np.abs(bunch.mean_x()) > 4 * init_offset) or (np.abs(
                bunch.mean_y()) > 4 * init_offset):
            output_data = output_data[:(i + 1), :]
            break

        if i % 100 is not 0:
            continue

        print(
            '{:4d} \t {:+3e} \t {:+3e} \t {:+3e} \t {:3e} \t {:3e} \t {:3f} \t {:3f} \t {:3f} \t {:3s}'
            .format(i, bunch.mean_x(), bunch.mean_y(), bunch.mean_z(),
                    bunch.epsn_x(), bunch.epsn_y(), bunch.epsn_z(),
                    bunch.sigma_z(), bunch.sigma_dp(), str(time.clock() - t0)))

    print('\n*** Successfully completed!')

    return output_data
    def init_master(self):

        # Manage multi-job operation
        if pp.footprint_mode:
            if pp.N_turns != pp.N_turns_target:
                raise ValueError(
                    'In footprint mode you need to set N_turns_target=N_turns_per_run!'
                )

        import PyPARIS_sim_class.Save_Load_Status as SLS
        SimSt = SLS.SimulationStatus(N_turns_per_run=pp.N_turns,
                                     check_for_resubmit=True,
                                     N_turns_target=pp.N_turns_target)
        SimSt.before_simulation()
        self.SimSt = SimSt

        # generate a bunch
        if pp.footprint_mode:
            self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles_for_footprint_track,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)
        elif SimSt.first_run:

            if pp.bunch_from_file is not None:
                print 'Loading bunch from file %s ...' % pp.bunch_from_file
                with h5py.File(pp.bunch_from_file, 'r') as fid:
                    self.bunch = self.buffer_to_piece(
                        np.array(fid['bunch']).copy())
                print 'Bunch loaded from file.\n'

            else:
                self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                    n_macroparticles=pp.n_macroparticles,
                    intensity=pp.intensity,
                    epsn_x=pp.epsn_x,
                    epsn_y=pp.epsn_y,
                    sigma_z=pp.sigma_z)

                # compute initial displacements
                inj_opt = self.machine.transverse_map.get_injection_optics()
                sigma_x = np.sqrt(inj_opt['beta_x'] * pp.epsn_x /
                                  self.machine.betagamma)
                sigma_y = np.sqrt(inj_opt['beta_y'] * pp.epsn_y /
                                  self.machine.betagamma)
                x_kick = pp.x_kick_in_sigmas * sigma_x
                y_kick = pp.y_kick_in_sigmas * sigma_y

                # apply initial displacement
                if not pp.footprint_mode:
                    self.bunch.x += x_kick
                    self.bunch.y += y_kick

                print 'Bunch initialized.'
        else:
            print 'Loading bunch from file...'
            with h5py.File(
                    'bunch_status_part%02d.h5' %
                (SimSt.present_simulation_part - 1), 'r') as fid:
                self.bunch = self.buffer_to_piece(
                    np.array(fid['bunch']).copy())
            print 'Bunch loaded from file.'

        # initial slicing
        self.slicer = UniformBinSlicer(n_slices=pp.n_slices,
                                       z_cuts=(-pp.z_cut, pp.z_cut))

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor(
            'bunch_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        # define a slice monitor
        from PyHEADTAIL.monitors.monitors import SliceMonitor
        self.slice_monitor = SliceMonitor(
            'slice_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns,
            self.slicer, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        #slice for the first turn
        slice_obj_list = self.bunch.extract_slices(self.slicer)

        pieces_to_be_treated = slice_obj_list

        print 'N_turns', self.N_turns

        if pp.footprint_mode:
            self.recorded_particles = ParticleTrajectories(
                pp.n_macroparticles_for_footprint_track, self.N_turns)

        return pieces_to_be_treated
Beispiel #27
0
class TestSlicing(unittest.TestCase):
    def setUp(self):
        #beam parameters
        self.intensity = 1.234e9
        self.circumference = 111.
        self.gamma = 20.1

        #simulation parameters
        self.macroparticlenumber = 100
        #must be multiple of nslices
        self.particlenumber_per_mp = self.intensity / self.macroparticlenumber

        #create a bunch
        self.bunch = self.create_bunch()

        #create a params for slicers
        self.nslices = 5
        self.z_cuts = (-20., 30.)  #asymmetric to check uniform_charge_slicer
        self.n_sigma_z = 5
        self.basic_slicer = UniformBinSlicer(self.nslices, z_cuts=self.z_cuts)
        self.basic_slice_set = self.basic_slicer.slice(self.bunch)

    def tearDown(self):
        pass

    def test_long_cuts(self):
        '''Tests whether the z_cuts are initialized correctly'''
        (cut_tail, cut_head) = self.basic_slicer.get_long_cuts(self.bunch)
        self.assertAlmostEqual(self.z_cuts[0], cut_tail,
                               'get_long_cuts incorrect (tail cut)')
        self.assertAlmostEqual(self.z_cuts[1], cut_head,
                               'get_long_cuts incorrect (head cut)')

    def test_z_cuts_warning(self):
        '''Tests whether a warning is raised whenever
        z_cut_tail >= z_cut_head
        '''
        inverse_z_cuts = (-0.1, -0.3)
        warnings = AccumulatorPrinter()
        slicer = UniformBinSlicer(self.nslices,
                                  z_cuts=inverse_z_cuts,
                                  warningprinter=warnings)
        self.assertTrue(
            len(warnings.log) > 0,
            'no warning generated when z_cut head < z_cut tail')

    def test_equality(self):
        '''Tests whether two slicers with the same config are equal
        in the sense of the == and != operator (calling __eq__, __ne__)
        '''
        unif_bin_slicer = UniformBinSlicer(self.nslices, z_cuts=self.z_cuts)
        unif_bin_slicer2 = UniformBinSlicer(self.nslices, z_cuts=self.z_cuts)
        self.assertTrue(
            unif_bin_slicer == unif_bin_slicer2,
            'comparing two uniform bin slicers with ' +
            'identical config using == returns False')
        self.assertFalse(
            unif_bin_slicer != unif_bin_slicer2,
            'comparing two uniform bin slicers with ' +
            'identical config using != returns True')

    def test_unif_charge_slicer(self):
        '''Tests whether the charges are equally distributed between
        the charges. Only works if nslices divides macroparticlenumber
        '''
        unif_charge_slicer = UniformChargeSlicer(n_slices=self.nslices,
                                                 z_cuts=self.z_cuts)
        slice_set = unif_charge_slicer.slice(self.bunch)
        p_per_slice = slice_set.n_macroparticles_per_slice
        if (self.macroparticlenumber % self.nslices == 0):
            self.assertTrue(
                check_elements_equal(p_per_slice),
                'slices in UniformChargeSlicer don\'t have' +
                'the same number of macroparticles in them')
        else:
            print('test_unif_charge_slicer() not run because ' +
                  'uniform charge distribution is impossible ' +
                  '(nparticles[' + str(self.macroparticlenumber) +
                  '] % nslices[' + str(self.nslices) + '] != 0)')

    def test_sliceset_macroparticles(self):
        '''Tests whether the sum of all particles per slice
        is equal to the specified number of macroparticles when specifying
        z_cuts which lie outside of the bunch
        '''
        #create a bunch and a slice set encompassing the whole bunch
        z_min, z_max = -2., 2.
        bunch = self.create_bunch(zmin=z_min, zmax=z_max)
        z_cuts = (z_min - 1, z_max + 1)
        slice_set = UniformChargeSlicer(n_slices=self.nslices,
                                        z_cuts=z_cuts).slice(bunch)
        n_particles = sum(slice_set.n_macroparticles_per_slice)
        self.assertEqual(self.macroparticlenumber, n_particles,
                         'the SliceSet lost/added some particles')

    def test_add_statistics(self):
        """ Tests whether any error gets thrown when calling the statistics
        functions of the slicer. Does not do any specific tests
        """
        self.basic_slicer.add_statistics(self.basic_slice_set, self.bunch,
                                         True)
        self.basic_slice_set.mean_x
        self.basic_slice_set.eff_epsn_y

    def test_emittance_no_dispersion(self):
        """ Tests whether the effective emittance and emittance are the same
        for a beam with no dispersion effects
        """
        bunch = self.create_bunch_with_params(1, 42, 0., 20)
        slice_set = self.basic_slicer.slice(bunch)
        self.basic_slicer.add_statistics(slice_set, bunch, True)
        for n in xrange(self.nslices):
            self.assertAlmostEqual(slice_set.epsn_x[n],
                                   slice_set.eff_epsn_x[n],
                                   places=4,
                                   msg='The effective emittance is not the ' +
                                   'same as the emittance for no dispersion')

    # exclude this test for now, fails at the moment but not clear whether
    # this should be changed
    #def test_sliceset_dimensions(self):
    #    '''Tests whether the dimensions of several slice_set properties
    #    match the specified number of slices
    #    '''
    #    self.assertTrue(self.basic_slice_set.slice_widths.size ==
    #                    self.nslices, 'slice_widths has wrong dimension')
    #    #print(self.basic_slice_set.slice_positions)
    #    self.assertTrue(self.basic_slice_set.slice_positions.size ==
    #                    self.nslices, 'slice_positions has wrong dimension')

    def create_bunch(self, zmin=-1., zmax=1.):
        z = np.linspace(zmin, zmax, num=self.macroparticlenumber)
        y = np.copy(z)
        x = np.copy(z)
        xp = np.linspace(-0.5, 0.5, num=self.macroparticlenumber)
        yp = np.copy(xp)
        dp = np.copy(xp)
        coords_n_momenta_dict = {
            'x': x,
            'y': y,
            'z': z,
            'xp': xp,
            'yp': yp,
            'dp': dp
        }
        return Particles(self.macroparticlenumber, self.particlenumber_per_mp,
                         e, m_p, self.circumference, self.gamma,
                         coords_n_momenta_dict)

    def create_bunch_with_params(self, alpha_x, beta_x, disp_x, gamma):
        np.random.seed(0)
        beta_y = beta_x
        alpha_y = alpha_x
        disp_y = disp_x
        alpha0 = [0.00308]
        C = 6911.
        Q_s = 0.017
        epsn_x = 3.75e-6
        epsn_y = 3.75e-6
        linear_map = LinearMap(alpha0, Q_s, C)
        # then transform...
        intensity = 1.05e11
        sigma_z = 0.23
        gamma_t = 1. / np.sqrt(linear_map.alpha_array[0])
        p0 = np.sqrt(gamma**2 - 1) * m_p * c

        beta_z = np.abs(
            (linear_map.eta(dp=0, gamma=gamma) * linear_map.circumference /
             (2 * np.pi * linear_map.Q_s)))

        epsn_z = 4 * np.pi * sigma_z**2 * p0 / (beta_z * e)
        #print ('epsn_z: ' + str(epsn_z))
        bunch = generate_Gaussian6DTwiss(
            macroparticlenumber=10000,
            intensity=intensity,
            charge=e,
            gamma=gamma,
            mass=m_p,
            circumference=linear_map.circumference,
            alpha_x=0.,
            beta_x=1.,
            epsn_x=epsn_x,
            alpha_y=0.,
            beta_y=1.,
            epsn_y=epsn_y,
            beta_z=beta_z,
            epsn_z=epsn_z)
        # Scale to correct beta and alpha
        xx = bunch.x.copy()
        yy = bunch.y.copy()
        bunch.x *= np.sqrt(beta_x)
        bunch.xp = -alpha_x / np.sqrt(beta_x) * xx + 1. / np.sqrt(
            beta_x) * bunch.xp
        bunch.y *= np.sqrt(beta_y)
        bunch.yp = -alpha_y / np.sqrt(beta_y) * yy + 1. / np.sqrt(
            beta_y) * bunch.yp
        bunch.x += disp_x * bunch.dp
        bunch.y += disp_y * bunch.dp
        return bunch
sigma_x = np.sqrt(inj_opt['beta_x'] * epsn_x / machine.betagamma)
sigma_y = np.sqrt(inj_opt['beta_y'] * epsn_y / machine.betagamma)

x_kick = x_kick_in_sigmas * sigma_x
y_kick = y_kick_in_sigmas * sigma_y

# define PIC grid size
Dh_sc = .2e-3

# define MP size
nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init

# define an electron cloud
import PyECLOUD.PyEC4PyHT as PyEC4PyHT
from PyHEADTAIL.particles.slicing import UniformBinSlicer
slicer = UniformBinSlicer(n_slices=64, n_sigma_z=2.)
ecloud = PyEC4PyHT.Ecloud(L_ecloud=machine.circumference / n_segments,
                          slicer=slicer,
                          Dt_ref=10e-12,
                          pyecl_input_folder='./pyecloud_config',
                          chamb_type=chamb_type,
                          x_aper=x_aper,
                          y_aper=y_aper,
                          filename_chm=filename_chm,
                          Dh_sc=Dh_sc,
                          init_unif_edens_flag=init_unif_edens_flag,
                          init_unif_edens=init_unif_edens,
                          N_mp_max=N_mp_max,
                          nel_mp_ref_0=nel_mp_ref_0,
                          B_multip=B_multip_per_eV * machine.p0 / e * c)
Beispiel #29
0
def run():
    #HELPERS
    def test_particle_indices_of_slice(bunch, slice_set):
        '''Get particle_indices_of_slice for specific slice index. Apply
        'inverse function' slice_index_of_particle to get back slice_index
        if everything works correctly.
        '''
        all_pass = True
        for i in xrange(slice_set.n_slices):
            pix_slice = slice_set.particle_indices_of_slice(i)
            six_pix = slice_set.slice_index_of_particle[pix_slice]
            if (six_pix != i).any():
                all_pass = False
        if not all_pass:
            print(
                '  Particle_indices_of_slice and slice_index_of_particle FAILED'
            )

    def slice_check_statistics(slice_set):
        '''Test if statistics functions are executable. No value
        checking
        '''
        slice_set.mean_x
        slice_set.sigma_x
        slice_set.epsn_x
        slice_set.mean_y
        slice_set.sigma_y
        slice_set.epsn_y
        slice_set.mean_z
        slice_set.sigma_z
        slice_set.epsn_z
        slice_set.mean_xp
        slice_set.mean_yp
        slice_set.mean_dp
        slice_set.sigma_dp

    def call_slice_set_attributes(bunch, slice_set, line_density_testing=True):
        # Call all the properties / attributes / methods.
        slice_set.z_cut_head
        slice_set.z_cut_tail
        slice_set.z_centers
        slice_set.n_slices
        slice_set.slice_widths
        slice_set.slice_positions
        slice_set.n_macroparticles_per_slice
        slice_set.particles_within_cuts
        slice_set.particle_indices_by_slice

        # if line_density_testing:
        #     slice_set.line_density_derivative_gauss()
        #     slice_set.line_density_derivative()

    def call_slicer_attributes():
        pass

    def clean_bunch(bunch):
        bunch.clean_slices()


# In[4]:
# Basic parameters.

    n_macroparticles = 500

    Q_x = 64.28
    Q_y = 59.31
    Q_s = 0.0020443

    C = 26658.883
    R = C / (2. * np.pi)

    alpha_x_inj = 0.
    alpha_y_inj = 0.
    beta_x_inj = 66.0064
    beta_y_inj = 71.5376
    alpha_0 = [0.0003225]

    # In[5]:

    # general simulation parameters
    n_particles = 10000

    # machine parameters
    circumference = 157.
    inj_alpha_x = 0
    inj_alpha_y = 0
    inj_beta_x = 5.9  # in [m]
    inj_beta_y = 5.7  # in [m]
    Qx = 5.1
    Qy = 6.1
    gamma_tr = 4.05
    alpha_c_array = [gamma_tr**-2]
    V_rf = 8e3  # in [V]
    harmonic = 1
    phi_offset = 0  # measured from aligned focussing phase (0 or pi)
    pipe_radius = 5e-2

    # beam parameters
    Ekin = 1.4e9  # in [eV]
    intensity = 1.684e12
    epsn_x = 2.5e-6  # in [m*rad]
    epsn_y = 2.5e-6  # in [m*rad]
    epsn_z = 1.2  # 4pi*sig_z*sig_dp (*p0/e) in [eVs]

    # calculations
    gamma = 1 + ee * Ekin / (m_p * c**2)
    beta = np.sqrt(1 - gamma**-2)
    eta = alpha_c_array[0] - gamma**-2
    if eta < 0:
        phi_offset = np.pi - phi_offset
    Etot = gamma * m_p * c**2 / ee
    p0 = np.sqrt(gamma**2 - 1) * m_p * c
    Q_s = np.sqrt(np.abs(eta) * V_rf / (2 * np.pi * beta**2 * Etot))
    beta_z = np.abs(eta) * circumference / (2 * np.pi * Q_s)
    turn_period = circumference / (beta * c)

    bunch = generators.generate_Gaussian6DTwiss(  # implicitly tests Gaussian and Gaussian2DTwiss as well
        n_particles,
        intensity,
        ee,
        m_p,
        circumference,
        gamma=gamma,
        alpha_x=inj_alpha_x,
        beta_x=inj_beta_x,
        epsn_x=epsn_x,
        alpha_y=inj_alpha_y,
        beta_y=inj_beta_y,
        epsn_y=epsn_y,
        beta_z=beta_z,
        epsn_z=epsn_z)

    # In[6]:

    # Uniform bin slicer
    n_slices = 10
    n_sigma_z = 2
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)

    # Request slice_set from bunch with the uniform_bin_slicer config.
    bunch._slice_sets
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer,
                                             statistics=True)
    bunch._slice_sets

    uniform_bin_slicer.config
    call_slice_set_attributes(bunch, uniform_bin_slice_set)
    #call_slicer_attributes(uniform_bin_slice_set)

    # Let bunch remove the slice_set.
    bunch.clean_slices()
    bunch._slice_sets

    # In[7]:

    # Uniform charge slicer
    n_slices = 10
    n_sigma_z = 2

    clean_bunch(bunch)

    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)
    uniform_charge_slice_set = bunch.get_slices(uniform_charge_slicer,
                                                statistics=True)
    uniform_charge_slice_set.mode
    uniform_charge_slicer.config
    call_slice_set_attributes(bunch,
                              uniform_charge_slice_set,
                              line_density_testing=False)

    try:
        call_slice_set_attributes(bunch,
                                  uniform_charge_slice_set,
                                  line_density_testing=True)
    except ModeIsNotUniformBin as e:
        pass

    # In[8]:

    # Other cases. When are slicers equal?
    n_slices = 10
    n_sigma_z = 2
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)
    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)

    # In[9]:

    # Other cases. When are slicers equal?
    n_slices = 10
    n_sigma_z = 2
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)
    uniform_bin_slicer_2 = UniformBinSlicer(n_slices, n_sigma_z)

    # In[10]:

    # Does bunch slice_set management work?
    n_slices = 10
    n_sigma_z = 2

    clean_bunch(bunch)

    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)

    uniform_charge_slice_set = bunch.get_slices(uniform_charge_slicer)
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer)
    uniform_charge_slice_set = bunch.get_slices(uniform_charge_slicer)

    bunch.clean_slices()

    # In[11]:

    # Old method update_slices should give RuntimeError.
    n_slices = 10
    n_sigma_z = 2

    clean_bunch(bunch)
    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)

    # In[12]:

    # beam parameters attached to SliceSet?
    n_slices = 10
    n_sigma_z = 2

    clean_bunch(bunch)

    slicer = UniformBinSlicer(n_slices, n_sigma_z)
    slices = bunch.get_slices(slicer)

    beam_parameters = slicer.extract_beam_parameters(bunch)

    for p_name, p_value in beam_parameters.iteritems():
        pass

    # In[14]:

    # CASE I
    # UniformBinSlicer, no longitudinal cut.
    n_slices = 10
    n_sigma_z = None
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)

    clean_bunch(bunch)

    bunch._slice_sets

    # Request slice_set from bunch with the uniform_bin_slicer config.
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer)
    bunch._slice_sets

    # In[15]:

    # CASE II
    # UniformBinSlicer, n_sigma_z = 1
    n_slices = 10
    n_sigma_z = 1
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)

    clean_bunch(bunch)

    bunch._slice_sets

    # Request slice_set from bunch with the uniform_bin_slicer config.
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer)
    bunch._slice_sets

    # In[16]:

    # CASE II b.
    # UniformBinSlicer, set z_cuts
    n_slices = 10
    z_cuts = (-0.05, 0.15)
    uniform_bin_slicer = UniformBinSlicer(n_slices, z_cuts=z_cuts)

    clean_bunch(bunch)

    bunch._slice_sets

    # Request slice_set from bunch with the uniform_bin_slicer config.
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer)
    bunch._slice_sets

    # In[18]:

    # CASE III
    # UniformChargeSlicer, no longitudinal cut.
    n_slices = 10
    n_sigma_z = None
    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)

    clean_bunch(bunch)

    bunch._slice_sets

    # Request slice_set from bunch with the uniform_charge_slicer config.
    bunch.get_slices(uniform_charge_slicer)
    bunch._slice_sets
Beispiel #30
0
def run(job_id, accQ_y):
    it = job_id

    # SIMULATION PARAMETERS
    # =====================

    # Simulation parameters
    n_turns = 10000
    n_macroparticles = 100000  # per bunch

    # MACHINE PARAMETERS
    # ==================

    intensity = 1e13  # protons
    #    intensity = 2*4e12 # protons
    E0 = 71e6  # Kinetic energy [eV]
    p0 = np.sqrt((m_p_MeV + E0)**2 - m_p_MeV**2) * e / c

    print('Beam kinetic energy: ' + str(E0 * 1e-6) + ' MeV')
    print('Beam momentum: ' + str(p0 * 1e-6 * c / e) + ' MeV/c')

    accQ_x = 4.31  # Horizontal tune
    #    accQ_y = 3.80 # Vertical tune is an input argument

    chroma = -1.4  # Chromaticity

    alpha = 5.034**-2  # momentum compaction factor

    circumference = 160.  # [meters]

    # Approximated average beta functions (lumped wake normalizations)
    beta_x = circumference / (2. * np.pi * accQ_x)
    beta_y = circumference / (2. * np.pi * accQ_y)

    # Harmonic number for RF
    h_RF = [2]  # a list of harmonic number for RF
    V_RF = [5e3 * 2]  # a list of RF voltages
    p_increment = 0.
    dphi_RF = [np.pi]  # a list of RF phases
    #    dphi_RF = 0.

    # non-linear longitudinal mode includes RF, otherwise linear needs synhotron tune Q_s
    longitudinal_mode = 'non-linear'
    #    Q_s=0.02 # Longitudinal tune

    optics_mode = 'smooth'
    n_segments = 1
    s = None
    alpha_x = None
    alpha_y = None
    D_x = 0
    D_y = 0
    charge = e
    mass = m_p
    name = None
    app_x = 0
    app_y = 0
    app_xy = 0

    # Creates PyHEADTAIL object for the synchotron
    machine = Synchrotron(optics_mode=optics_mode,
                          circumference=circumference,
                          n_segments=n_segments,
                          s=s,
                          name=name,
                          alpha_x=alpha_x,
                          beta_x=beta_x,
                          D_x=D_x,
                          alpha_y=alpha_y,
                          beta_y=beta_y,
                          D_y=D_y,
                          accQ_x=accQ_x,
                          accQ_y=accQ_y,
                          Qp_x=chroma,
                          Qp_y=chroma,
                          app_x=app_x,
                          app_y=app_y,
                          app_xy=app_xy,
                          alpha_mom_compaction=alpha,
                          longitudinal_mode=longitudinal_mode,
                          h_RF=np.atleast_1d(h_RF),
                          V_RF=np.atleast_1d(V_RF),
                          dphi_RF=np.atleast_1d(dphi_RF),
                          p0=p0,
                          p_increment=p_increment,
                          charge=charge,
                          mass=mass)

    print('')
    print('machine.beta: ')
    print(machine.beta)
    print('')

    print('')
    print('machine.gamma: ')
    print(machine.gamma)
    print('')

    epsn_x = 300e-6
    epsn_y = 300e-6
    sigma_z = 15  # bunch length in meters to be matched to the bucket

    # Creates transverse macroparticle distribution
    allbunches = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                                    intensity, epsn_x, epsn_y,
                                                    sigma_z)

    # Creates longitudinal macroparticle distribution
    rfb = RFBucket(circumference, machine.gamma, m_p, e, [alpha], 0., h_RF,
                   V_RF, dphi_RF)
    rfb_matcher = RFBucketMatcher(rfb, WaterbagDistribution, sigma_z=sigma_z)

    rfb_matcher.integrationmethod = 'cumtrapz'

    z, dp, _, _ = rfb_matcher.generate(n_macroparticles)
    np.copyto(allbunches.z, z)
    np.copyto(allbunches.dp, dp)

    # Slicer object, which used for wakefields and slice monitors
    slicer = UniformBinSlicer(75, z_cuts=(-2. * sigma_z, 2. * sigma_z))

    # WAKE FIELDS
    # ===========

    # Length of the wake function in turns, wake
    n_turns_wake = 150

    # Parameters for a resonator
    # frequency is in the units of (mode-Q_frac), where
    #       mode: integer number of coupled bunch mode (1 matches to the observations)
    #       Q_frac: resonance fractional tune

    f_r = (1 - 0.83) * 1. / (circumference / (c * machine.beta))
    Q = 15
    R = 1.0e6

    # Renator wake object, which is added to the one turn map
    wakes = CircularResonator(R, f_r, Q, n_turns_wake=n_turns_wake)
    wake_field = WakeField(slicer, wakes)
    machine.one_turn_map.append(wake_field)

    # CREATE MONITORS
    # ===============
    simulation_parameters_dict = {'gamma'           : machine.gamma,\
                                  'intensity'       : intensity,\
                                  'Qx'              : accQ_x,\
                                  'Qy'              : accQ_y,\
#                                  'Qs'              : Q_s,\
                                  'beta_x'          : beta_x,\
                                  'beta_y'          : beta_y,\
    #                               'beta_z'          : bucket.beta_z,\
                                  'epsn_x'          : epsn_x,\
                                  'epsn_y'          : epsn_y,\
                                  'sigma_z'         : sigma_z,\
                                 }
    # Bunch monitor strores bunch average positions for all the bunches
    bunchmonitor = BunchMonitor(outputpath + '/bunchmonitor_{:04d}'.format(it),
                                n_turns,
                                simulation_parameters_dict,
                                write_buffer_every=32,
                                buffer_size=32)

    # Slice monitors saves slice-by-slice data for each bunch
    slicemonitor = SliceMonitor(outputpath +
                                '/slicemonitor_{:01d}_{:04d}'.format(0, it),
                                60,
                                slicer,
                                simulation_parameters_dict,
                                write_buffer_every=60,
                                buffer_size=60)

    # Counter for a number of turns stored to slice monitors
    s_cnt = 0

    # TRACKING LOOP
    # =============
    monitor_active = False
    print('\n--> Begin tracking...\n')

    for i in range(n_turns):
        t0 = time.clock()

        # Tracks beam through the one turn map simulation map
        machine.track(allbunches)

        # Stores bunch mean coordinate values
        bunchmonitor.dump(allbunches)

        # If the total oscillation amplitude of bunches exceeds the threshold
        # or the simulation is running on the last turns, triggers the slice
        # monitors for headtail motion data
        if (allbunches.mean_x() > 1e-1 or allbunches.mean_y() > 1e-1 or i >
            (n_turns - 64)):
            monitor_active = True

        # saves slice monitor data if monitors are activated and less than
        # 64 turns have been stored
        if monitor_active and s_cnt < 64:
            slicemonitor.dump(allbunches)
            s_cnt += 1
        elif s_cnt == 64:
            break

        # If this script is runnin on the first processor, prints the current
        # bunch coordinates and emittances
        if (i % 100 == 0):
            print(
                '{:4d} \t {:+3e} \t {:+3e} \t {:+3e} \t {:3e} \t {:3e} \t {:3f} \t {:3f} \t {:3f} \t {:3s}'
                .format(i, allbunches.mean_x(), allbunches.mean_y(),
                        allbunches.mean_z(), allbunches.epsn_x(),
                        allbunches.epsn_y(), allbunches.epsn_z(),
                        allbunches.sigma_z(), allbunches.sigma_dp(),
                        str(time.clock() - t0)))
Beispiel #31
0
def run():
    #HELPERS
    def test_particle_indices_of_slice(bunch, slice_set):
        '''Get particle_indices_of_slice for specific slice index. Apply
        'inverse function' slice_index_of_particle to get back slice_index
        if everything works correctly.
        '''
        all_pass = True
        for i in xrange(slice_set.n_slices):
            pix_slice = slice_set.particle_indices_of_slice(i)
            six_pix = slice_set.slice_index_of_particle[pix_slice]
            if (six_pix != i).any():
                all_pass = False
        if not all_pass:
            print ('  Particle_indices_of_slice and slice_index_of_particle FAILED')

    def slice_check_statistics(slice_set):
        '''Test if statistics functions are executable. No value
        checking
        '''
        slice_set.mean_x
        slice_set.sigma_x
        slice_set.epsn_x
        slice_set.mean_y
        slice_set.sigma_y
        slice_set.epsn_y
        slice_set.mean_z
        slice_set.sigma_z
        slice_set.epsn_z
        slice_set.mean_xp
        slice_set.mean_yp
        slice_set.mean_dp
        slice_set.sigma_dp


    def call_slice_set_attributes(bunch, slice_set, line_density_testing=True):
        # Call all the properties / attributes / methods.
        slice_set.z_cut_head
        slice_set.z_cut_tail
        slice_set.z_centers
        slice_set.n_slices
        slice_set.slice_widths
        slice_set.slice_positions
        slice_set.n_macroparticles_per_slice
        slice_set.particles_within_cuts
        slice_set.particle_indices_by_slice


        # if line_density_testing:
        #     slice_set.line_density_derivative_gauss()
        #     slice_set.line_density_derivative()


    def call_slicer_attributes():
        pass

    def clean_bunch(bunch):
        bunch.clean_slices()


# In[4]:
    # Basic parameters.
    n_macroparticles = 500

    Q_x = 64.28
    Q_y = 59.31
    Q_s = 0.0020443

    C = 26658.883
    R = C / (2.*np.pi)

    alpha_x_inj = 0.
    alpha_y_inj = 0.
    beta_x_inj = 66.0064
    beta_y_inj = 71.5376
    alpha_0 = [0.0003225]


    # In[5]:

    # general simulation parameters
    n_particles = 10000

    # machine parameters
    circumference = 157.
    inj_alpha_x = 0
    inj_alpha_y = 0
    inj_beta_x = 5.9 # in [m]
    inj_beta_y = 5.7 # in [m]
    Qx = 5.1
    Qy = 6.1
    gamma_tr = 4.05
    alpha_c_array = [gamma_tr**-2]
    V_rf = 8e3 # in [V]
    harmonic = 1
    phi_offset = 0 # measured from aligned focussing phase (0 or pi)
    pipe_radius = 5e-2

    # beam parameters
    Ekin = 1.4e9 # in [eV]
    intensity = 1.684e12
    epsn_x = 2.5e-6 # in [m*rad]
    epsn_y = 2.5e-6 # in [m*rad]
    epsn_z = 1.2 # 4pi*sig_z*sig_dp (*p0/e) in [eVs]

    # calculations
    gamma = 1 + ee * Ekin / (m_p * c**2)
    beta = np.sqrt(1 - gamma**-2)
    eta = alpha_c_array[0] - gamma**-2
    if eta < 0:
        phi_offset = np.pi - phi_offset
    Etot = gamma * m_p * c**2 / ee
    p0 = np.sqrt(gamma**2 - 1) * m_p * c
    Qs = np.sqrt(np.abs(eta) * V_rf / (2 * np.pi * beta**2 * Etot))
    beta_z = np.abs(eta) * circumference / (2 * np.pi * Qs)
    turn_period = circumference / (beta * c)

    bunch = generators.generate_Gaussian6DTwiss( # implicitly tests Gaussian and Gaussian2DTwiss as well
        n_particles, intensity, ee, m_p, circumference, gamma=gamma,
        alpha_x=inj_alpha_x, beta_x=inj_beta_x, epsn_x=epsn_x,
        alpha_y=inj_alpha_y, beta_y=inj_beta_y, epsn_y=epsn_y,
        beta_z=beta_z, epsn_z=epsn_z
        )



    # In[6]:

    # Uniform bin slicer
    n_slices = 10
    n_sigma_z = 2
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)

    # Request slice_set from bunch with the uniform_bin_slicer config.
    bunch._slice_sets
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer, statistics=True)
    bunch._slice_sets

    uniform_bin_slicer.config
    call_slice_set_attributes(bunch, uniform_bin_slice_set)
    #call_slicer_attributes(uniform_bin_slice_set)

    # Let bunch remove the slice_set.
    bunch.clean_slices()
    bunch._slice_sets


    # In[7]:

    # Uniform charge slicer
    n_slices = 10
    n_sigma_z = 2

    clean_bunch(bunch)

    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)
    uniform_charge_slice_set = bunch.get_slices(uniform_charge_slicer, statistics=True)
    uniform_charge_slice_set.mode
    uniform_charge_slicer.config
    call_slice_set_attributes(bunch, uniform_charge_slice_set, line_density_testing=False)

    try:
        call_slice_set_attributes(bunch, uniform_charge_slice_set, line_density_testing=True)
    except ModeIsNotUniformBin as e:
        pass



    # In[8]:

    # Other cases. When are slicers equal?
    n_slices = 10
    n_sigma_z = 2
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)
    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)


    # In[9]:

    # Other cases. When are slicers equal?
    n_slices = 10
    n_sigma_z = 2
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)
    uniform_bin_slicer_2 = UniformBinSlicer(n_slices, n_sigma_z)


    # In[10]:

    # Does bunch slice_set management work?
    n_slices = 10
    n_sigma_z = 2

    clean_bunch(bunch)

    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)

    uniform_charge_slice_set = bunch.get_slices(uniform_charge_slicer)
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer)
    uniform_charge_slice_set = bunch.get_slices(uniform_charge_slicer)

    bunch.clean_slices()


    # In[11]:

    # Old method update_slices should give RuntimeError.
    n_slices = 10
    n_sigma_z = 2

    clean_bunch(bunch)
    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)


    # In[12]:

    # beam parameters attached to SliceSet?
    n_slices = 10
    n_sigma_z = 2

    clean_bunch(bunch)

    slicer = UniformBinSlicer(n_slices, n_sigma_z)
    slices = bunch.get_slices(slicer)

    beam_parameters = slicer.extract_beam_parameters(bunch)

    for p_name, p_value in beam_parameters.iteritems():
        pass

    # In[14]:

    # CASE I
    # UniformBinSlicer, no longitudinal cut.
    n_slices = 10
    n_sigma_z = None
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)

    clean_bunch(bunch)

    bunch._slice_sets

    # Request slice_set from bunch with the uniform_bin_slicer config.
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer)
    bunch._slice_sets



    # In[15]:

    # CASE II
    # UniformBinSlicer, n_sigma_z = 1
    n_slices = 10
    n_sigma_z = 1
    uniform_bin_slicer = UniformBinSlicer(n_slices, n_sigma_z)

    clean_bunch(bunch)

    bunch._slice_sets

    # Request slice_set from bunch with the uniform_bin_slicer config.
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer)
    bunch._slice_sets



    # In[16]:

    # CASE II b.
    # UniformBinSlicer, set z_cuts
    n_slices = 10
    z_cuts = (-0.05, 0.15)
    uniform_bin_slicer = UniformBinSlicer(n_slices, z_cuts=z_cuts)

    clean_bunch(bunch)

    bunch._slice_sets

    # Request slice_set from bunch with the uniform_bin_slicer config.
    uniform_bin_slice_set = bunch.get_slices(uniform_bin_slicer)
    bunch._slice_sets




    # In[18]:

    # CASE III
    # UniformChargeSlicer, no longitudinal cut.
    n_slices = 10
    n_sigma_z = None
    uniform_charge_slicer = UniformChargeSlicer(n_slices, n_sigma_z)

    clean_bunch(bunch)

    bunch._slice_sets

    # Request slice_set from bunch with the uniform_charge_slicer config.
    bunch.get_slices(uniform_charge_slicer)
    bunch._slice_sets
n_r = 3*200
N_max = 29
n_tail_cut = 0
include_detuning_with_long_amplitude = True
r_b = 4*sigma_z
a_param = 8./r_b**2
lambda_param = 1

pool_size = 0 # N cores (0 for serial)

###################
# Build impedance # 
###################

# Slicer
slicer_for_wakefields = UniformBinSlicer(
                        n_slices_wake, z_cuts=(-z_cut, z_cut))

# Dipolar wake
wake_dipolar = wakes.Resonator(R_shunt=resonator_R_shunt,
        frequency=resonator_frequency,
        Q=resonator_Q,
        Yokoya_X1=Yokoya_X1,
        Yokoya_Y1=0.,
        Yokoya_X2=0.,
        Yokoya_Y2=0.,
        switch_Z=0)
wake_dipolar_element = wakes.WakeField(slicer_for_wakefields, wake_dipolar)

# Quadrupolar wake
wake_quadrupolar = wakes.Resonator(R_shunt=resonator_R_shunt,
        frequency=resonator_frequency,
Beispiel #33
0
# Turn slices into buffer
list_buffers = []
list_bunch_buffers = []
for bb in list_bunches:
    these_slices = st.slice_a_bunch(bb, z_cut=z_cut, n_slices=n_slices)
    list_bunch_buffers.append([])
    for ss in these_slices:
        thisbuffer = ch.beam_2_buffer(ss,verbose=True, mode='pickle')
        list_buffers.append(thisbuffer)
        list_bunch_buffers[-1].append(thisbuffer)

big_buffer = ch.combine_float_buffers(list_buffers)


# Build profile of the full beam
thin_slicer = UniformBinSlicer(n_slices=10000, z_cuts=(-len(filling_pattern)*bucket_length_m*b_spac_buckets, bucket_length_m))
thin_slice_set = beam.get_slices(thin_slicer, statistics=True)

import matplotlib.pyplot as plt
plt.close('all')

ms.mystyle_arial(fontsz=14, dist_tick_lab=5)



# re-split buffer
list_buffers_rec = ch.split_float_buffers(big_buffer)

# Plot including sub-slicing
fig1 = plt.figure(1, figsize=(8, 6*1.3))
fig1.set_facecolor('w')