Esempio n. 1
0
 def setUp(self):
     self.n_turns = 10
     self.bunch_fn = 'bunchm'
     self.s_fn = 'sm'
     self.nslices = 5
     self.bunch_monitor = BunchMonitor(filename=self.bunch_fn,
                                       n_steps=self.n_turns,
                                       write_buffer_every=2,
                                       buffer_size=7,
                                       stats_to_store=['mean_x', 'macrop'])
Esempio n. 2
0
    def _prepare_monitors(self):

        pp = self.pp

        if hasattr(pp, 'write_buffer_every'):
            write_buffer_every = pp.write_buffer_every
        else:
            write_buffer_every = 3

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor

        self.bunch_monitor = BunchMonitor(
            "bunch_evolution_%02d" % self.SimSt.present_simulation_part,
            pp.N_turns,
            {"Comment": "PyHDTL simulation"},
            write_buffer_every=write_buffer_every,
        )

        # define a slice monitor
        from PyHEADTAIL.monitors.monitors import SliceMonitor

        self.slice_monitor = SliceMonitor(
            "slice_evolution_%02d" % self.SimSt.present_simulation_part,
            pp.N_turns,
            self.slicer,
            {"Comment": "PyHDTL simulation"},
            write_buffer_every=write_buffer_every,
        )
Esempio n. 3
0
    def init_start_ring(self):
        stats_to_store = [
            'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp',
            'sigma_x', 'sigma_y', 'sigma_z', 'sigma_dp', 'epsn_x', 'epsn_y',
            'epsn_z', 'macroparticlenumber', 'i_bunch', 'i_turn'
        ]

        n_stored_turns = len(filling_pattern) * (
            self.ring_of_CPUs.N_turns / self.ring_of_CPUs.N_parellel_rings +
            self.ring_of_CPUs.N_parellel_rings)

        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor(
            'bunch_monitor_ring%03d' % self.ring_of_CPUs.myring,
            n_stored_turns, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=1,
            stats_to_store=stats_to_store)
Esempio n. 4
0
 def setUp(self):
     self.n_turns = 20
     self.bunch_fn = 'bunchm'
     self.s_fn = 'sm'
     self.nslices = 5
     self.bunch_monitor = BunchMonitor(filename=self.bunch_fn,
                          n_steps=self.n_turns,
                          write_buffer_every=20, buffer_size=39,
                          stats_to_store=['mean_x', 'macrop'])
Esempio n. 5
0
 def test_bunchmonitor(self):
     ''' Test the bunchmonitor and all statistics functions
     '''
     self.n_macroparticles = 100  # use a high number to get acc statistics
     n_steps = 5
     self.monitor_fn = 'monitor'
     bunchmonitor1 = BunchMonitor(self.monitor_fn + '1',
                                  n_steps=n_steps,
                                  write_buffer_every=2,
                                  buffer_size=3)
     bunchmonitor2 = BunchMonitor(self.monitor_fn + '2',
                                  n_steps=n_steps,
                                  write_buffer_every=2,
                                  buffer_size=3)
     bunch_cpu = self.create_gaussian_bunch()
     bunch_gpu = self.create_gaussian_bunch()
     self._monitor_cpu_gpu(bunchmonitor1, bunchmonitor2, bunch_cpu,
                           bunch_gpu)
    def init_master(self):

        # generate a bunch
        bunch = self.machine.generate_6D_Gaussian_bunch_matched(
            n_macroparticles=n_macroparticles,
            intensity=intensity,
            epsn_x=epsn_x,
            epsn_y=epsn_y,
            sigma_z=sigma_z)
        print 'Bunch initialized.'

        # initial slicing
        from PyHEADTAIL.particles.slicing import UniformBinSlicer
        self.slicer = UniformBinSlicer(n_slices=n_slices, n_sigma_z=n_sigma_z)

        # compute initial displacements
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x = np.sqrt(inj_opt['beta_x'] * epsn_x / self.machine.betagamma)
        sigma_y = np.sqrt(inj_opt['beta_y'] * epsn_y / self.machine.betagamma)
        x_kick = x_kick_in_sigmas * sigma_x
        y_kick = y_kick_in_sigmas * sigma_y

        # apply initial displacement
        bunch.x += x_kick
        bunch.y += y_kick

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor('bunch_evolution',
                                          N_turns,
                                          {'Comment': 'PyHDTL simulation'},
                                          write_buffer_every=8)

        #slice for the first turn
        slice_obj_list = bunch.extract_slices(self.slicer)

        pieces_to_be_treated = slice_obj_list

        print 'N_turns', self.N_turns

        return pieces_to_be_treated
Esempio n. 7
0
    def init_start_ring(self):
        stats_to_store = [
         'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp',
         'sigma_x', 'sigma_y', 'sigma_z','sigma_dp', 'epsn_x', 'epsn_y',
         'epsn_z', 'macroparticlenumber',
         'i_bunch', 'i_turn']

        n_stored_turns = len(filling_pattern)*(self.ring_of_CPUs.N_turns/self.ring_of_CPUs.N_parellel_rings + self.ring_of_CPUs.N_parellel_rings)

        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor('bunch_monitor_ring%03d'%self.ring_of_CPUs.myring,
                            n_stored_turns, 
                            {'Comment':'PyHDTL simulation'}, 
                            write_buffer_every = 1,
                            stats_to_store = stats_to_store)
	def init_master(self):
		
		# generate a bunch 
		bunch = self.machine.generate_6D_Gaussian_bunch_matched(
						n_macroparticles=n_macroparticles, intensity=intensity, 
						epsn_x=epsn_x, epsn_y=epsn_y, sigma_z=sigma_z)
		print 'Bunch initialized.'

		# initial slicing
		from PyHEADTAIL.particles.slicing import UniformBinSlicer
		self.slicer = UniformBinSlicer(n_slices = n_slices, n_sigma_z = n_sigma_z)

		# compute initial displacements
		inj_opt = self.machine.transverse_map.get_injection_optics()
		sigma_x = np.sqrt(inj_opt['beta_x']*epsn_x/self.machine.betagamma)
		sigma_y = np.sqrt(inj_opt['beta_y']*epsn_y/self.machine.betagamma)
		x_kick = x_kick_in_sigmas*sigma_x
		y_kick = y_kick_in_sigmas*sigma_y
		
		# apply initial displacement
		bunch.x += x_kick
		bunch.y += y_kick
		
		# define a bunch monitor 
		from PyHEADTAIL.monitors.monitors import BunchMonitor
		self.bunch_monitor = BunchMonitor('bunch_evolution', N_turns, {'Comment':'PyHDTL simulation'}, 
							write_buffer_every = 8)

		
		#slice for the first turn
		slice_obj_list = bunch.extract_slices(self.slicer)

		pieces_to_be_treated = slice_obj_list
		
		print 'N_turns', self.N_turns

		return pieces_to_be_treated
def run(intensity, chroma=0, i_oct=0):
    '''Arguments:
        - intensity: integer number of charges in beam
        - chroma: first-order chromaticity Q'_{x,y}, identical
          for both transverse planes
        - i_oct: octupole current in A (positive i_oct means
          LOF = i_oct > 0 and LOD = -i_oct < 0)
    '''

    # BEAM AND MACHINE PARAMETERS
    # ============================
    from LHC import LHC
    # energy set above will enter get_nonlinear_params p0
    assert machine_configuration == 'LHC-injection'
    machine = LHC(n_segments=1,
                  machine_configuration=machine_configuration,
                  **get_nonlinear_params(chroma=chroma,
                                         i_oct=i_oct,
                                         p0=0.45e12 * e / c))

    # BEAM
    # ====
    epsn_x = 3.e-6  # normalised horizontal emittance
    epsn_y = 3.e-6  # normalised vertical emittance
    sigma_z = 1.2e-9 * machine.beta * c / 4.  # RMS bunch length in meters

    bunch = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                               intensity,
                                               epsn_x,
                                               epsn_y,
                                               sigma_z=sigma_z,
                                               matched=True)

    print("\n--> Bunch length and emittance: {:g} m, {:g} eVs.".format(
        bunch.sigma_z(), bunch.epsn_z()))

    # CREATE BEAM SLICERS
    # ===================
    slicer_for_slicemonitor = UniformBinSlicer(50,
                                               z_cuts=(-3 * sigma_z,
                                                       3 * sigma_z))
    slicer_for_wakefields = UniformBinSlicer(
        50,
        z_cuts=(-3 * sigma_z, 3 * sigma_z),
        circumference=machine.circumference,
        h_bunch=machine.h_bunch)
    print("Slice")

    # CREATE WAKES
    # ============
    wake_table1 = WakeTable(
        wakefile,
        [
            'time',
            'dipole_x',
            'dipole_y',
            # 'quadrupole_x', 'quadrupole_y',
            'noquadrupole_x',
            'noquadrupole_y',
            # 'dipole_xy', 'dipole_yx',
            'nodipole_xy',
            'nodipole_yx',
        ])
    wake_field = WakeField(slicer_for_wakefields, wake_table1, mpi=True)

    # CREATE DAMPER
    # =============
    dampingtime = 50.
    gain = 2. / dampingtime
    damper = IdealBunchFeedback(gain)

    # CREATE MONITORS
    # ===============
    try:
        bucket = machine.longitudinal_map.get_bucket(bunch)
    except AttributeError:
        bucket = machine.rfbucket

    simulation_parameters_dict = {
        'gamma': machine.gamma,
        'intensity': intensity,
        'Qx': machine.Q_x,
        'Qy': machine.Q_y,
        'Qs': bucket.Q_s,
        'beta_x': bunch.beta_Twiss_x(),
        'beta_y': bunch.beta_Twiss_y(),
        'beta_z': bucket.beta_z,
        'epsn_x': bunch.epsn_x(),
        'epsn_y': bunch.epsn_y(),
        'sigma_z': bunch.sigma_z(),
    }
    bunchmonitor = BunchMonitor(
        outputpath + '/bunchmonitor_{:04d}_chroma={:g}'.format(it, chroma),
        n_turns,
        simulation_parameters_dict,
        write_buffer_to_file_every=512,
        buffer_size=4096)
    slicemonitor = SliceMonitor(
        outputpath + '/slicemonitor_{:04d}_chroma={:g}'.format(it, chroma),
        n_turns_slicemon,
        slicer_for_slicemonitor,
        simulation_parameters_dict,
        write_buffer_to_file_every=1,
        buffer_size=n_turns_slicemon)

    # TRACKING LOOP
    # =============
    # machine.one_turn_map.append(damper)
    machine.one_turn_map.append(wake_field)

    # for slice statistics monitoring:
    s_cnt = 0
    monitorswitch = False

    print('\n--> Begin tracking...\n')

    # GO!!!
    for i in range(n_turns):

        t0 = time.clock()

        # track the beam around the machine for one turn:
        machine.track(bunch)

        ex, ey, ez = bunch.epsn_x(), bunch.epsn_y(), bunch.epsn_z()
        mx, my, mz = bunch.mean_x(), bunch.mean_y(), bunch.mean_z()

        # monitor the bunch statistics (once per turn):
        bunchmonitor.dump(bunch)

        # if the centroid becomes unstable (>1cm motion)
        # then monitor the slice statistics:
        if not monitorswitch:
            if mx > 1e-2 or my > 1e-2 or i > n_turns - n_turns_slicemon:
                print("--> Activate slice monitor")
                monitorswitch = True
        else:
            if s_cnt < n_turns_slicemon:
                slicemonitor.dump(bunch)
                s_cnt += 1

        # stop the tracking as soon as we have not-a-number values:
        if not all(np.isfinite(c) for c in [ex, ey, ez, mx, my, mz]):
            print('*** STOPPING SIMULATION: non-finite bunch stats!')
            break

        # print status all 1000 turns:
        if i % 100 == 0:
            t1 = time.clock()
            print('Emittances: ({:.3g}, {:.3g}, {:.3g}) '
                  '& Centroids: ({:.3g}, {:.3g}, {:.3g})'
                  '@ turn {:d}, {:g} ms, {:s}'.format(
                      ex, ey, ez, mx, my, mz, i, (t1 - t0) * 1e3,
                      time.strftime("%d/%m/%Y %H:%M:%S", time.localtime())))

    print('\n*** Successfully completed!')
Esempio n. 10
0
class TestMonitor(unittest.TestCase):
    ''' Test the BunchMonitor/SliceMonitor'''
    def setUp(self):
        self.n_turns = 20
        self.bunch_fn = 'bunchm'
        self.s_fn = 'sm'
        self.nslices = 5
        self.bunch_monitor = BunchMonitor(filename=self.bunch_fn,
                             n_steps=self.n_turns,
                             write_buffer_every=20, buffer_size=39,
                             stats_to_store=['mean_x', 'macrop'])

    def tearDown(self):
        try:
            os.remove(self.bunch_fn + '.h5')
            os.remove(self.s_fn + '.h5')
            pass
        except:
            pass

    def test_bunchmonitor(self):
        '''
        Test whether the data stored in the h5 file correspond to the
        correct values. Use a mock bunch class which creates an easy
        to check pattern when accessing 'mean_x', 'macrop'
        '''
        mock = self.generate_mock_bunch()
        for i in xrange(self.n_turns):
            self.bunch_monitor.dump(mock)
        bunchdata = hp.File(self.bunch_fn + '.h5')
        b = bunchdata['Bunch']
        self.assertTrue(np.allclose(b['mean_x'],
            np.arange(start=1, stop=self.n_turns+0.5)))
        self.assertTrue(np.allclose(b['macrop'], 99*np.ones(self.n_turns)))

    def test_slicemonitor(self):
        '''
        Test whether the slicemonitor works as excpected, use the mock slicer
        '''
        nslices = 3
        mock_slicer = self.generate_mock_slicer(nslices)
        mock_bunch = self.generate_mock_bunch()
        slice_monitor = SliceMonitor(filename=self.s_fn, n_steps=self.n_turns,
                slicer=mock_slicer, buffer_size=11, write_buffer_every=9,
                slice_stats_to_store=['propertyA'],
                bunch_stats_to_store=['mean_x', 'macrop'])
        for i in xrange(self.n_turns):
            slice_monitor.dump(mock_bunch)
        s = hp.File(self.s_fn + '.h5')
        sd = s['Slices']
        sb = s['Bunch']
        self.assertTrue(np.allclose(sb['mean_x'],
            np.arange(start=1, stop=self.n_turns+0.5)))
        self.assertTrue(np.allclose(sb['macrop'], 99*np.ones(self.n_turns)))
        for k in xrange(nslices):
            for j in xrange(self.n_turns):
                self.assertTrue(np.allclose(sd['propertyA'][k,j],
                    k + (j+1)*1000), 'Slices part of SliceMonitor wrong')


    def generate_mock_bunch(self):
        '''
        Create a mock class which defines certain attributes which can be
        stored via the BunchMonitor
        '''
        class Mock():
            def __init__(self):
                self.counter = np.zeros(3, dtype=np.int32) #1 for each of mean/std/...
                self.macrop = 99

            def mean_x(self):
                self.counter[0] += 1
                return self.counter[0]

            def mean_y(self):
                self.counter[1] += 1
                return self.counter[1]

            def get_slices(self, slicer, **kwargs):
                return slicer

        return Mock()

    def generate_mock_slicer(self, nslices):
        ''' Create a mock slicer to test behaviour'''
        class Mock():
            def __init__(self, nslices):
                self.n_slices = nslices
                self.counter = 0

            @property
            def propertyA(self):
                ''' Return an array of length nslices, np.arange(nslices)
                Add the number of calls * 1000 to the array
                This makes it easy to compare the results
                '''
                self.counter += 1
                prop = np.arange(0, self.n_slices, 1, dtype=np.float64)
                prop += self.counter*1000
                return prop

        return Mock(nslices)
Esempio n. 11
0
def run():
    # HELPERS
    def read_all_data(bfile, sfile, pfile):
        bunchdata = hp.File(bfile + '.h5')
        slicedata = hp.File(sfile + '.h5')
        particledata = hp.File(pfile + '.h5part')

        # Bunchdata
        bdata = bunchdata['Bunch']

        n_turns = len(bdata['mean_x'])
        _ = np.empty(n_turns)
        for key in list(bdata.keys()):
            _[:] = bdata[key][:]

        # Slicedata
        sdata = slicedata['Slices']
        sbdata = slicedata['Bunch']

        n_turns = len(sbdata['mean_x'])
        _ = np.empty(n_turns)
        for key in list(sbdata.keys()):
            _[:] = sbdata[key][:]

        n_slices, n_turns = sdata['mean_x'].shape
        _ = np.empty((n_slices, n_turns))
        for key in list(sdata.keys()):
            _[:, :] = sdata[key][:, :]

        # Particledata
        pdata = particledata['Step#0']
        n_particles = len(pdata['x'])
        n_steps = len(list(particledata.keys()))
        _ = np.empty(n_particles)

        for i in range(n_steps):
            step = 'Step#%d' % i
            for key in list(particledata[step].keys()):
                _[:] = particledata[step][key][:]

        bunchdata.close()
        slicedata.close()
        particledata.close()

    def read_n_plot_data(bfile, sfile, pfile):
        bunchdata = hp.File(bfile + '.h5')
        slicedata = hp.File(sfile + '.h5')
        particledata = hp.File(pfile + '.h5part')

        fig = plt.figure(figsize=(16, 16))
        ax1 = fig.add_subplot(311)
        ax2 = fig.add_subplot(312)
        ax3 = fig.add_subplot(313)

        ax1.plot(bunchdata['Bunch']['mean_x'][:])
        ax2.plot(slicedata['Slices']['mean_x'][:, :])
        ax3.plot(particledata['Step#0']['x'][:])
        #ax2.plot(slicedata[])

        plt.show()

        bunchdata.close()
        slicedata.close()
        particledata.close()

    def generate_bunch(n_macroparticles, alpha_x, alpha_y, beta_x, beta_y,
                       alpha_0, Q_s, R):

        intensity = 1.05e11
        sigma_z = 0.059958
        gamma = 3730.26
        eta = alpha_0 - 1. / gamma**2
        gamma_t = 1. / np.sqrt(alpha_0)
        p0 = np.sqrt(gamma**2 - 1) * m_p * c

        beta_z = eta * R / Q_s

        epsn_x = 3.75e-6  # [m rad]
        epsn_y = 3.75e-6  # [m rad]
        epsn_z = 4 * np.pi * sigma_z**2 * p0 / (
            beta_z * e
        )  # WITH OR WITHOUT 4 PIjQuery202047649151738733053_1414145430832?

        bunch = generators.generate_Gaussian6DTwiss(
            macroparticlenumber=n_macroparticles,
            intensity=intensity,
            charge=e,
            gamma=gamma,
            mass=m_p,
            circumference=C,
            alpha_x=alpha_x,
            beta_x=beta_x,
            epsn_x=epsn_x,
            alpha_y=alpha_y,
            beta_y=beta_y,
            epsn_y=epsn_y,
            beta_z=beta_z,
            epsn_z=epsn_z)
        return bunch

    # In[4]:
    # Basic parameters.
    n_turns = 2
    n_segments = 5
    n_macroparticles = 500

    Q_x = 64.28
    Q_y = 59.31
    Q_s = 0.0020443

    C = 26658.883
    R = C / (2. * np.pi)

    alpha_x_inj = 0.
    alpha_y_inj = 0.
    beta_x_inj = 66.0064
    beta_y_inj = 71.5376
    alpha_0 = 0.0003225

    # ##### Things tested:   - Instantiation of the three monitors BunchMonitor, SliceMonitor, ParticleMonitor.   - dump(beam) method for all the three.   - read data from file. Plot example data from Bunch-, Slice- and Particle-Monitors.   - SliceMonitor: does it handle/request slice_sets correctly?   - Buffers are on for Bunch- and SliceMonitors.  Look at one of the files in hdfview to check the units, attributes, ...

    # In[5]:

    # Parameters for transverse map.
    s = np.arange(0, n_segments + 1) * C / n_segments

    alpha_x = alpha_x_inj * np.ones(n_segments)
    beta_x = beta_x_inj * np.ones(n_segments)
    D_x = np.zeros(n_segments)

    alpha_y = alpha_y_inj * np.ones(n_segments)
    beta_y = beta_y_inj * np.ones(n_segments)
    D_y = np.zeros(n_segments)

    # In[6]:

    # Instantiate BunchMonitor, SliceMonitor and ParticleMonitor and dump data to file.
    bunch = generate_bunch(n_macroparticles, alpha_x_inj, alpha_y_inj,
                           beta_x_inj, beta_y_inj, alpha_0, Q_s, R)

    trans_map = TransverseMap(s, alpha_x, beta_x, D_x, alpha_y, beta_y, D_y,
                              Q_x, Q_y)

    # Slicer config for SliceMonitor.
    unibin_slicer = UniformBinSlicer(n_slices=10, n_sigma_z=None, z_cuts=None)

    # Monitors
    bunch_filename = 'bunch_mon'
    slice_filename = 'slice_mon'
    particle_filename = 'particle_mon'

    bunch_monitor = BunchMonitor(filename=bunch_filename,
                                 n_steps=n_turns,
                                 parameters_dict={'Q_x': Q_x},
                                 write_buffer_every=20)
    slice_monitor = SliceMonitor(filename=slice_filename,
                                 n_steps=n_turns,
                                 slicer=unibin_slicer,
                                 parameters_dict={'Q_x': Q_x},
                                 write_buffer_every=20)
    particle_monitor = ParticleMonitor(filename=particle_filename,
                                       stride=10,
                                       parameters_dict={'Q_x': Q_x})

    arrays_dict = {}
    map_ = trans_map
    for i in range(n_turns):
        for m_ in map_:
            m_.track(bunch)
        bunch_monitor.dump(bunch)
        slice_monitor.dump(bunch)

        slice_set_pmon = bunch.get_slices(unibin_slicer)
        arrays_dict.update({
            'slidx': slice_set_pmon.slice_index_of_particle,
            'zz': bunch.z
        })
        particle_monitor.dump(bunch, arrays_dict)

    read_all_data(bunch_filename, slice_filename, particle_filename)

    os.remove(bunch_filename + '.h5')
    os.remove(slice_filename + '.h5')
    os.remove(particle_filename + '.h5part')

# apply initial displacement
bunch.x += x_kick
bunch.y += y_kick


# Manage multi-job operation
import Save_Load_Status as SLS
SimSt = SLS.SimulationStatus(N_turns_per_run=N_turns_per_run, N_turns_target=N_turns_target, jobid=jobid,check_for_resubmit=True, queue=queue)
SimSt.before_simulation()


# define a bunch monitor 
from PyHEADTAIL.monitors.monitors import BunchMonitor
bunch_monitor = BunchMonitor('bunch_evolution_%02d'%SimSt.present_simulation_part, N_turns_per_run, {'Comment':'PyHDTL simulation'}, 
					write_buffer_every = 8)


if not SimSt.first_run:
	# If not first part, load saved bunch and ecloud
	bunch = SLS.beam_from_h5status('bunch_status_part%02d.h5'%(SimSt.present_simulation_part-1))
	SLS.reinit_ecloud_from_h5status('ecloud_init_status.h5', ecloud)
else:
	#save ecloud particles status
	SLS.dump_ecloud_status(ecloud, 'ecloud_init_status.h5')

# simulate
import time
for i_turn in xrange(N_turns_per_run):
	print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn)
	machine.track(bunch, verbose = False)
class Simulation(object):
    def __init__(self):
        self.N_turns = N_turns

    def init_all(self):

        self.n_slices = n_slices
        self.n_segments = n_segments

        # define the machine
        from LHC_custom import LHC
        self.machine = LHC(n_segments=n_segments,
                           machine_configuration=machine_configuration)

        # define MP size
        nel_mp_ref_0 = init_unif_edens * 4 * x_aper * y_aper / N_MP_ele_init

        # prepare e-cloud
        import PyECLOUD.PyEC4PyHT as PyEC4PyHT
        ecloud = PyEC4PyHT.Ecloud(
            slice_by_slice_mode=True,
            L_ecloud=self.machine.circumference / n_segments,
            slicer=None,
            Dt_ref=Dt_ref,
            pyecl_input_folder=pyecl_input_folder,
            chamb_type=chamb_type,
            x_aper=x_aper,
            y_aper=y_aper,
            filename_chm=filename_chm,
            Dh_sc=Dh_sc,
            init_unif_edens_flag=init_unif_edens_flag,
            init_unif_edens=init_unif_edens,
            N_mp_max=N_mp_max,
            nel_mp_ref_0=nel_mp_ref_0,
            B_multip=B_multip_per_eV * self.machine.p0 / e * c)

        # setup transverse losses (to "protect" the ecloud)
        import PyHEADTAIL.aperture.aperture as aperture
        apt_xy = aperture.EllipticalApertureXY(
            x_aper=ecloud.impact_man.chamb.x_aper,
            y_aper=ecloud.impact_man.chamb.y_aper)
        self.machine.one_turn_map.append(apt_xy)

        n_non_parallelizable = 2  #rf and aperture

        # We suppose that all the object that cannot be slice parallelized are at the end of the ring
        i_end_parallel = len(self.machine.one_turn_map) - n_non_parallelizable

        # split the machine
        sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes)
        myid = self.ring_of_CPUs.myid
        i_start_part, i_end_part = sharing.my_part(myid)
        self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]
        if self.ring_of_CPUs.I_am_a_worker:
            print 'I am id=%d/%d (worker) and my part is %d long' % (
                myid, self.ring_of_CPUs.N_nodes, len(self.mypart))
        elif self.ring_of_CPUs.I_am_the_master:
            self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]
            print 'I am id=%d/%d (master) and my part is %d long' % (
                myid, self.ring_of_CPUs.N_nodes, len(self.mypart))

        #install eclouds in my part
        my_new_part = []
        self.my_list_eclouds = []
        for ele in self.mypart:
            my_new_part.append(ele)
            if ele in self.machine.transverse_map:
                ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge(
                )
                my_new_part.append(ecloud_new)
                self.my_list_eclouds.append(ecloud_new)
        self.mypart = my_new_part

    def init_master(self):

        # generate a bunch
        bunch = self.machine.generate_6D_Gaussian_bunch_matched(
            n_macroparticles=n_macroparticles,
            intensity=intensity,
            epsn_x=epsn_x,
            epsn_y=epsn_y,
            sigma_z=sigma_z)
        print 'Bunch initialized.'

        # initial slicing
        from PyHEADTAIL.particles.slicing import UniformBinSlicer
        self.slicer = UniformBinSlicer(n_slices=n_slices, n_sigma_z=n_sigma_z)

        # compute initial displacements
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x = np.sqrt(inj_opt['beta_x'] * epsn_x / self.machine.betagamma)
        sigma_y = np.sqrt(inj_opt['beta_y'] * epsn_y / self.machine.betagamma)
        x_kick = x_kick_in_sigmas * sigma_x
        y_kick = y_kick_in_sigmas * sigma_y

        # apply initial displacement
        bunch.x += x_kick
        bunch.y += y_kick

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor('bunch_evolution',
                                          N_turns,
                                          {'Comment': 'PyHDTL simulation'},
                                          write_buffer_every=8)

        #slice for the first turn
        slice_obj_list = bunch.extract_slices(self.slicer)

        pieces_to_be_treated = slice_obj_list

        print 'N_turns', self.N_turns

        return pieces_to_be_treated

    def init_worker(self):
        pass

    def treat_piece(self, piece):
        for ele in self.mypart:
            ele.track(piece)

    def finalize_turn_on_master(self, pieces_treated):

        # re-merge bunch
        bunch = sum(pieces_treated)

        #finalize present turn (with non parallel part, e.g. synchrotron motion)
        for ele in self.non_parallel_part:
            ele.track(bunch)

        # save results
        #print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn)
        self.bunch_monitor.dump(bunch)

        # prepare next turn (re-slice)
        new_pieces_to_be_treated = bunch.extract_slices(self.slicer)
        orders_to_pass = ['reset_clouds']

        return orders_to_pass, new_pieces_to_be_treated

    def execute_orders_from_master(self, orders_from_master):
        if 'reset_clouds' in orders_from_master:
            for ec in self.my_list_eclouds:
                ec.finalize_and_reinitialize()

    def finalize_simulation(self):
        pass

    def piece_to_buffer(self, piece):
        buf = ch.beam_2_buffer(piece)
        return buf

    def buffer_to_piece(self, buf):
        piece = ch.buffer_2_beam(buf)
        return piece
Esempio n. 14
0
def run(intensity, chroma=0, i_oct=0):
    '''Arguments:
        - intensity: integer number of charges in beam
        - chroma: first-order chromaticity Q'_{x,y}, identical
          for both transverse planes
        - i_oct: octupole current in A (positive i_oct means
          LOF = i_oct > 0 and LOD = -i_oct < 0)
    '''

    # BEAM AND MACHINE PARAMETERS
    # ============================
    from LHC import LHC
    # energy set above will enter get_nonlinear_params p0
    assert machine_configuration == 'LHC-injection'
    machine = LHC(n_segments=1,
                  machine_configuration=machine_configuration,
                  **get_nonlinear_params(chroma=chroma,
                                         i_oct=i_oct,
                                         p0=0.45e12 * e / c))

    # BEAM
    # ====

    #print(filling_scheme)

    epsn_x = 3.e-6  # normalised horizontal emittance
    epsn_y = 3.e-6  # normalised vertical emittance
    sigma_z = 1.2e-9 * machine.beta * c / 4.  # RMS bunch length in meters

    beam = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                              intensity,
                                              epsn_x,
                                              epsn_y,
                                              sigma_z=sigma_z,
                                              matched=True,
                                              filling_scheme=filling_scheme)

    bunch_list = beam.split_to_views()

    for b in bunch_list:
        if b.bucket_id[0] < batch_length:
            b.x += 1e-3
            b.y += 1e-3

    bunch = bunch_list[0]

    print("\n--> Bunch length and emittance: {:g} m, {:g} eVs.".format(
        bunch.sigma_z(), bunch.epsn_z()))

    # CREATE BEAM SLICERS
    # ===================
    slicer_for_slicemonitor = UniformBinSlicer(50,
                                               z_cuts=(-3 * sigma_z,
                                                       3 * sigma_z))
    slicer_for_wakefields = UniformBinSlicer(
        50,
        z_cuts=(-3 * sigma_z, 3 * sigma_z),
        circumference=machine.circumference,
        h_bunch=machine.h_bunch)

    # CREATE WAKES
    # ============
    wake_table1 = WakeTable(
        wakefile,
        [
            'time',
            'dipole_x',
            'dipole_y',
            # 'quadrupole_x', 'quadrupole_y',
            'noquadrupole_x',
            'noquadrupole_y',
            # 'dipole_xy', 'dipole_yx',
            'nodipole_xy',
            'nodipole_yx',
        ])
    wake_field = WakeField(slicer_for_wakefields,
                           wake_table1,
                           mpi='linear_mpi_full_ring_fft')

    # CREATE DAMPER
    # =============
    from PyHEADTAIL_feedback.feedback import OneboxFeedback
    from PyHEADTAIL_feedback.processors.multiplication import ChargeWeighter
    from PyHEADTAIL_feedback.processors.register import TurnFIRFilter
    from PyHEADTAIL_feedback.processors.convolution import Lowpass, FIRFilter
    from PyHEADTAIL_feedback.processors.resampling import DAC, HarmonicADC, BackToOriginalBins, Upsampler
    from MD4063_filter_functions import calculate_coefficients_3_tap, calculate_hilbert_notch_coefficients
    #    from PyHEADTAIL_feedback.processors.addition import NoiseGenerator

    dampingtime = 20.
    gain = 2. / dampingtime

    lowpass100kHz = [
        1703, 1169, 1550, 1998, 2517, 3108, 3773, 4513, 5328, 6217, 7174, 8198,
        9282, 10417, 11598, 12813, 14052, 15304, 16555, 17793, 19005, 20176,
        21294, 22345, 23315, 24193, 24969, 25631, 26171, 26583, 26860, 27000,
        27000, 26860, 26583, 26171, 25631, 24969, 24193, 23315, 22345, 21294,
        20176, 19005, 17793, 16555, 15304, 14052, 12813, 11598, 10417, 9282,
        8198, 7174, 6217, 5328, 4513, 3773, 3108, 2517, 1998, 1550, 1169, 1703
    ]

    lowpassEnhanced = [
        490, 177, -478, -820, -370, 573, 1065, 428, -909, -1632, -799, 1015,
        2015, 901, -1592, -3053, -1675, 1642, 3670, 1841, -2828, -6010, -3929,
        2459, 7233, 4322, -6384, -17305, -18296, -5077, 16097, 32000, 32000,
        16097, -5077, -18296, -17305, -6384, 4322, 7233, 2459, -3929, -6010,
        -2828, 1841, 3670, 1642, -1675, -3053, -1592, 901, 2015, 1015, -799,
        -1632, -909, 428, 1065, 573, -370, -820, -478, 177, 490
    ]

    lowpass20MHz = [
        38, 118, 182, 112, -133, -389, -385, -45, 318, 257, -259, -665, -361,
        473, 877, 180, -996, -1187, 162, 1670, 1329, -954, -2648, -1219, 2427,
        4007, 419, -5623, -6590, 2893, 19575, 32700, 32700, 19575, 2893, -6590,
        -5623, 419, 4007, 2427, -1219, -2648, -954, 1329, 1670, 162, -1187,
        -996, 180, 877, 473, -361, -665, -259, 257, 318, -45, -385, -389, -133,
        112, 182, 118, 38
    ]

    phaseEqualizer = [
        2, 4, 7, 10, 12, 16, 19, 22, 27, 31, 36, 42, 49, 57, 67, 77, 90, 104,
        121, 141, 164, 191, 223, 261, 305, 358, 422, 498, 589, 700, 836, 1004,
        1215, 1483, 1832, 2301, 2956, 3944, 5600, 9184, 25000, -16746, -4256,
        -2056, -1195, -769, -523, -372, -271, -202, -153, -118, -91, -71, -56,
        -44, -34, -27, -20, -15, -11, -7, -4, -1
    ]

    FIR_phase_filter = np.loadtxt(
        './injection_error_input_data/FIR_Phase_40MSPS.csv')
    FIR_phase_filter = np.array(phaseEqualizer)
    FIR_phase_filter = FIR_phase_filter / float(np.sum(FIR_phase_filter))

    FIR_gain_filter = np.array(lowpass20MHz)
    FIR_gain_filter = FIR_gain_filter / float(np.sum(lowpass20MHz))

    # Cut-off frequency of the kicker system
    fc = 1.0e6
    ADC_bits = 16
    ADC_range = (-1e-3, 1e-3)

    # signal processing delay in turns before the first measurements is applied
    delay = 1
    extra_adc_bins = 10
    # betatron phase advance between the pickup and the kicker. The value 0.25
    # corresponds to the 90 deg phase change from from the pickup measurements
    # in x-plane to correction kicks in xp-plane.

    additional_phase = 0.25  # Kicker-to-pickup phase advance 0 deg
    #    additional_phase = 0. # Kicker-to-pickup phase advance 90 deg

    f_RF = 1. / (machine.circumference / c / (float(machine.h_RF)))
    #    turn_phase_filter_x = calculate_hilbert_notch_coefficients(machine.Q_x, delay, additional_phase)
    #    turn_phase_filter_y = calculate_hilbert_notch_coefficients(machine.Q_y, delay, additional_phase)

    turn_phase_filter_x = calculate_coefficients_3_tap(machine.Q_x, delay,
                                                       additional_phase)
    turn_phase_filter_y = calculate_coefficients_3_tap(machine.Q_y, delay,
                                                       additional_phase)

    print('f_RF: ' + str(f_RF))

    processors_detailed_x = [
        Bypass(),
        ChargeWeighter(normalization='segment_average'),
        #         NoiseGenerator(RMS_noise_level, debug=False),
        HarmonicADC(1 * f_RF / 10.,
                    ADC_bits,
                    ADC_range,
                    n_extras=extra_adc_bins),
        TurnFIRFilter(turn_phase_filter_x, machine.Q_x, delay=delay),
        FIRFilter(FIR_phase_filter, zero_tap=40),
        Upsampler(3, [1.5, 1.5, 0]),
        FIRFilter(FIR_gain_filter, zero_tap=34),
        DAC(ADC_bits, ADC_range),
        Lowpass(fc, f_cutoff_2nd=10 * fc),
        BackToOriginalBins(),
    ]

    processors_detailed_y = [
        Bypass(),
        ChargeWeighter(normalization='segment_average'),
        #         NoiseGenerator(RMS_noise_level, debug=False),
        HarmonicADC(1 * f_RF / 10.,
                    ADC_bits,
                    ADC_range,
                    n_extras=extra_adc_bins),
        TurnFIRFilter(turn_phase_filter_y, machine.Q_y, delay=delay),
        FIRFilter(FIR_phase_filter, zero_tap=40),
        Upsampler(3, [1.5, 1.5, 0]),
        FIRFilter(FIR_gain_filter, zero_tap=34),
        DAC(ADC_bits, ADC_range),
        Lowpass(fc, f_cutoff_2nd=10 * fc),
        BackToOriginalBins(),
    ]

    # Kicker-to-pickup phase advance 0 deg
    damper = OneboxFeedback(gain,
                            slicer_for_wakefields,
                            processors_detailed_x,
                            processors_detailed_y,
                            pickup_axis='displacement',
                            kicker_axis='divergence',
                            mpi=True,
                            beta_x=machine.beta_x,
                            beta_y=machine.beta_y)

    #    # Kicker-to-pickup phase advance 90 deg
    #    damper = OneboxFeedback(gain,slicer_for_wakefields,
    #                                  processors_detailed_x,processors_detailed_y, mpi=True,
    #                            pickup_axis='displacement', kicker_axis='displacement')

    # CREATE MONITORS
    # ===============

    try:
        bucket = machine.longitudinal_map.get_bucket(bunch)
    except AttributeError:
        bucket = machine.rfbucket

    simulation_parameters_dict = {
        'gamma': machine.gamma,
        'intensity': intensity,
        'Qx': machine.Q_x,
        'Qy': machine.Q_y,
        'Qs': bucket.Q_s,
        'beta_x': bunch.beta_Twiss_x(),
        'beta_y': bunch.beta_Twiss_y(),
        'beta_z': bucket.beta_z,
        'epsn_x': bunch.epsn_x(),
        'epsn_y': bunch.epsn_y(),
        'sigma_z': bunch.sigma_z(),
    }
    bunchmonitor = BunchMonitor(
        outputpath + '/bunchmonitor_{:04d}_chroma={:g}'.format(it, chroma),
        n_turns,
        simulation_parameters_dict,
        write_buffer_to_file_every=512,
        buffer_size=4096,
        mpi=True,
        filling_scheme=filling_scheme)
    #    slicemonitor = SliceMonitor(
    #        outputpath+'/slicemonitor_{:04d}_chroma={:g}_bunch_{:04d}'.format(it, chroma, bunch.bucket_id[0]),
    #        n_turns_slicemon,
    #        slicer_for_slicemonitor, simulation_parameters_dict,
    #        write_buffer_to_file_every=1, buffer_size=n_turns_slicemon)

    # TRACKING LOOP
    # =============
    machine.one_turn_map.append(damper)
    machine.one_turn_map.append(wake_field)

    # for slice statistics monitoring:
    s_cnt = 0
    monitorswitch = False

    print('\n--> Begin tracking...\n')

    # GO!!!
    for i in range(n_turns):

        t0 = time.clock()

        # track the beam around the machine for one turn:
        machine.track(beam)

        bunch_list = beam.split_to_views()
        bunch = bunch_list[0]

        ex, ey, ez = bunch.epsn_x(), bunch.epsn_y(), bunch.epsn_z()
        mx, my, mz = bunch.mean_x(), bunch.mean_y(), bunch.mean_z()

        # monitor the bunch statistics (once per turn):
        bunchmonitor.dump(beam)

        # if the centroid becomes unstable (>1cm motion)
        # then monitor the slice statistics:
        if not monitorswitch:
            if mx > 1e-2 or my > 1e-2 or i > n_turns - n_turns_slicemon:
                print("--> Activate slice monitor")
                monitorswitch = True
        else:
            if s_cnt < n_turns_slicemon:
                #                slicemonitor.dump(bunch)
                s_cnt += 1

        # stop the tracking as soon as we have not-a-number values:
        if not all(np.isfinite(c) for c in [ex, ey, ez, mx, my, mz]):
            print('*** STOPPING SIMULATION: non-finite bunch stats!')
            break

        # print status all 1000 turns:
        if i % 100 == 0:
            t1 = time.clock()
            print('Emittances: ({:.3g}, {:.3g}, {:.3g}) '
                  '& Centroids: ({:.3g}, {:.3g}, {:.3g})'
                  '@ turn {:d}, {:g} ms, {:s}'.format(
                      ex, ey, ez, mx, my, mz, i, (t1 - t0) * 1e3,
                      time.strftime("%d/%m/%Y %H:%M:%S", time.localtime())))

    print('\n*** Successfully completed!')
Esempio n. 15
0
def run():
# HELPERS
    def read_all_data(bfile, sfile, pfile):
        bunchdata = hp.File(bfile + '.h5')
        slicedata = hp.File(sfile + '.h5')
        particledata = hp.File(pfile + '.h5part')

        # Bunchdata
        bdata = bunchdata['Bunch']

        n_turns = len(bdata['mean_x'])
        _ = np.empty(n_turns)
        for key in bdata.keys():
            _[:] = bdata[key][:]

        # Slicedata
        sdata = slicedata['Slices']
        sbdata = slicedata['Bunch']

        n_turns = len(sbdata['mean_x'])
        _ = np.empty(n_turns)
        for key in sbdata.keys():
            _[:] = sbdata[key][:]

        n_slices, n_turns = sdata['mean_x'].shape
        _ = np.empty((n_slices, n_turns))
        for key in sdata.keys():
            _[:,:] = sdata[key][:,:]

        # Particledata
        pdata = particledata['Step#0']
        n_particles = len(pdata['x'])
        n_steps = len(particledata.keys())
        _ = np.empty(n_particles)

        for i in xrange(n_steps):
            step = 'Step#%d' % i
            for key in particledata[step].keys():
                _[:] = particledata[step][key][:]

        bunchdata.close()
        slicedata.close()
        particledata.close()

    def read_n_plot_data(bfile, sfile, pfile):
        bunchdata = hp.File(bfile + '.h5')
        slicedata = hp.File(sfile + '.h5')
        particledata = hp.File(pfile + '.h5part')

        fig = plt.figure(figsize=(16, 16))
        ax1 = fig.add_subplot(311)
        ax2 = fig.add_subplot(312)
        ax3 = fig.add_subplot(313)

        ax1.plot(bunchdata['Bunch']['mean_x'][:])
        ax2.plot(slicedata['Slices']['mean_x'][:,:])
        ax3.plot(particledata['Step#0']['x'][:])
        #ax2.plot(slicedata[])

        plt.show()

        bunchdata.close()
        slicedata.close()
        particledata.close()

    def generate_bunch(n_macroparticles, alpha_x, alpha_y, beta_x, beta_y, alpha_0, Q_s, R):

        intensity = 1.05e11
        sigma_z = 0.059958
        gamma = 3730.26
        eta = alpha_0 - 1. / gamma**2
        gamma_t = 1. / np.sqrt(alpha_0)
        p0 = np.sqrt(gamma**2 - 1) * m_p * c

        beta_z = eta * R / Q_s

        epsn_x = 3.75e-6 # [m rad]
        epsn_y = 3.75e-6 # [m rad]
        epsn_z = 4 * np.pi * sigma_z**2 * p0 / (beta_z * e) # WITH OR WITHOUT 4 PIjQuery202047649151738733053_1414145430832?

        bunch = generators.generate_Gaussian6DTwiss(
            macroparticlenumber=n_macroparticles, intensity=intensity, charge=e,
            gamma=gamma, mass=m_p, circumference=C,
            alpha_x=alpha_x, beta_x=beta_x, epsn_x=epsn_x,
            alpha_y=alpha_y, beta_y=beta_y, epsn_y=epsn_y,
            beta_z=beta_z, epsn_z=epsn_z)
        return bunch


    # In[4]:
        # Basic parameters.
    n_turns = 2
    n_segments = 5
    n_macroparticles = 500

    Q_x = 64.28
    Q_y = 59.31
    Q_s = 0.0020443

    C = 26658.883
    R = C / (2.*np.pi)

    alpha_x_inj = 0.
    alpha_y_inj = 0.
    beta_x_inj = 66.0064
    beta_y_inj = 71.5376
    alpha_0 = 0.0003225


    # ##### Things tested:   - Instantiation of the three monitors BunchMonitor, SliceMonitor, ParticleMonitor.   - dump(beam) method for all the three.   - read data from file. Plot example data from Bunch-, Slice- and Particle-Monitors.   - SliceMonitor: does it handle/request slice_sets correctly?   - Buffers are on for Bunch- and SliceMonitors.  Look at one of the files in hdfview to check the units, attributes, ...

    # In[5]:

    # Parameters for transverse map.
    s = np.arange(0, n_segments + 1) * C / n_segments

    alpha_x = alpha_x_inj * np.ones(n_segments)
    beta_x = beta_x_inj * np.ones(n_segments)
    D_x = np.zeros(n_segments)

    alpha_y = alpha_y_inj * np.ones(n_segments)
    beta_y = beta_y_inj * np.ones(n_segments)
    D_y = np.zeros(n_segments)


    # In[6]:

    # Instantiate BunchMonitor, SliceMonitor and ParticleMonitor and dump data to file.
    bunch = generate_bunch(
        n_macroparticles, alpha_x_inj, alpha_y_inj, beta_x_inj, beta_y_inj,
        alpha_0, Q_s, R)

    trans_map = TransverseMap(
        s, alpha_x, beta_x, D_x, alpha_y, beta_y, D_y, Q_x, Q_y)

    # Slicer config for SliceMonitor.
    unibin_slicer = UniformBinSlicer(n_slices=10, n_sigma_z=None, z_cuts=None)

    # Monitors
    bunch_filename = 'bunch_mon'
    slice_filename = 'slice_mon'
    particle_filename = 'particle_mon'

    bunch_monitor = BunchMonitor(filename=bunch_filename, n_steps=n_turns, parameters_dict={'Q_x': Q_x},
                                 write_buffer_every=20)
    slice_monitor = SliceMonitor(
        filename=slice_filename, n_steps=n_turns, slicer=unibin_slicer, parameters_dict={'Q_x': Q_x},
        write_buffer_every=20)
    particle_monitor = ParticleMonitor(filename=particle_filename, stride=10, parameters_dict={'Q_x': Q_x})

    arrays_dict = {}
    map_ = trans_map
    for i in xrange(n_turns):
        for m_ in map_:
            m_.track(bunch)
        bunch_monitor.dump(bunch)
        slice_monitor.dump(bunch)

        slice_set_pmon = bunch.get_slices(unibin_slicer)
        arrays_dict.update({'slidx': slice_set_pmon.slice_index_of_particle, 'zz': bunch.z})
        particle_monitor.dump(bunch, arrays_dict)

    read_all_data(bunch_filename, slice_filename, particle_filename)

    os.remove(bunch_filename + '.h5')
    os.remove(slice_filename + '.h5')
    os.remove(particle_filename + '.h5part')
Esempio n. 16
0
class Simulation(object):
    def __init__(self):
        self.N_turns = 10000
        self.N_buffer_float_size = 10000000
        self.N_buffer_int_size = 20
        self.N_parellel_rings = 2

        self.n_slices_per_bunch = 200
        self.z_cut_slicing = 3 * sigma_z_bunch
        self.N_pieces_per_transfer = 300

    def init_all(self):

        print('Exec init...')

        self.ring_of_CPUs.verbose = verbose

        from LHC_custom import LHC
        self.machine = LHC(n_segments=n_segments,
                           machine_configuration=machine_configuration,
                           Qp_x=Qp_x,
                           Qp_y=Qp_y)
        self.n_non_parallelizable = 1  #RF

        inj_optics = self.machine.transverse_map.get_injection_optics()
        sigma_x_smooth = np.sqrt(inj_optics['beta_x'] * epsn_x /
                                 self.machine.betagamma)
        sigma_y_smooth = np.sqrt(inj_optics['beta_y'] * epsn_y /
                                 self.machine.betagamma)

        if flag_aperture:
            # setup transverse losses (to "protect" the ecloud)
            import PyHEADTAIL.aperture.aperture as aperture
            apt_xy = aperture.EllipticalApertureXY(
                x_aper=target_size_internal_grid_sigma * sigma_x_smooth,
                y_aper=target_size_internal_grid_sigma * sigma_x_smooth)
            self.machine.one_turn_map.append(apt_xy)
            self.n_non_parallelizable += 1

        if enable_transverse_damper:
            # setup transverse damper
            from PyHEADTAIL.feedback.transverse_damper import TransverseDamper
            damper = TransverseDamper(dampingrate_x=dampingrate_x,
                                      dampingrate_y=dampingrate_y)
            self.machine.one_turn_map.append(damper)
            self.n_non_parallelizable += 1

        if enable_ecloud:
            print('Build ecloud...')
            import PyECLOUD.PyEC4PyHT as PyEC4PyHT
            ecloud = PyEC4PyHT.Ecloud(
                L_ecloud=L_ecloud_tot / n_segments,
                slicer=None,
                slice_by_slice_mode=True,
                Dt_ref=5e-12,
                pyecl_input_folder='./pyecloud_config',
                chamb_type='polyg',
                filename_chm='LHC_chm_ver.mat',
                #init_unif_edens_flag=1,
                #init_unif_edens=1e7,
                #N_mp_max = 3000000,
                #nel_mp_ref_0 = 1e7/(0.7*3000000),
                #B_multip = [0.],
                #~ PyPICmode = 'ShortleyWeller_WithTelescopicGrids',
                #~ f_telescope = 0.3,
                target_grid={
                    'x_min_target':
                    -target_size_internal_grid_sigma * sigma_x_smooth,
                    'x_max_target':
                    target_size_internal_grid_sigma * sigma_x_smooth,
                    'y_min_target':
                    -target_size_internal_grid_sigma * sigma_y_smooth,
                    'y_max_target':
                    target_size_internal_grid_sigma * sigma_y_smooth,
                    'Dh_target': .2 * sigma_x_smooth
                },
                #~ N_nodes_discard = 10.,
                #~ N_min_Dh_main = 10,
                #x_beam_offset = x_beam_offset,
                #y_beam_offset = y_beam_offset,
                #probes_position = probes_position,
                save_pyecl_outp_as='cloud_evol_ring%d' %
                self.ring_of_CPUs.myring,
                sparse_solver='PyKLU',
                enable_kick_x=enable_kick_x,
                enable_kick_y=enable_kick_y)
            print('Done.')

        # split the machine
        i_end_parallel = len(
            self.machine.one_turn_map) - self.n_non_parallelizable
        sharing = shs.ShareSegments(i_end_parallel,
                                    self.ring_of_CPUs.N_nodes_per_ring)
        i_start_part, i_end_part = sharing.my_part(
            self.ring_of_CPUs.myid_in_ring)
        self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]

        if self.ring_of_CPUs.I_am_at_end_ring:
            self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]

        #install eclouds in my part
        if enable_ecloud:
            my_new_part = []
            self.my_list_eclouds = []
            for ele in self.mypart:
                if ele in self.machine.transverse_map:
                    ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge(
                    )

                    # we save buildup info only for the first cloud in each ring
                    if self.ring_of_CPUs.myid_in_ring > 0 or len(
                            self.my_list_eclouds) > 0:
                        ecloud_new.remove_savers()

                    my_new_part.append(ecloud_new)
                    self.my_list_eclouds.append(ecloud_new)
                my_new_part.append(ele)

            self.mypart = my_new_part

            print((
                'Hello, I am %d.%d, my part looks like: %s. Saver status: %s' %
                (self.ring_of_CPUs.myring, self.ring_of_CPUs.myid_in_ring,
                 self.mypart, [(ec.cloudsim.cloud_list[0].pyeclsaver
                                is not None) for ec in self.my_list_eclouds])))

    def init_master(self):

        print('Building the beam!')

        from scipy.constants import c as clight, e as qe
        from PyHEADTAIL.particles.slicing import UniformBinSlicer

        import PyPARIS.gen_multibunch_beam as gmb
        list_bunches = gmb.gen_matched_multibunch_beam(
            self.machine, macroparticlenumber, filling_pattern, b_spac_s,
            bunch_intensity, epsn_x, epsn_y, sigma_z, non_linear_long_matching,
            min_inten_slice4EC)

        if pickle_beam:
            import pickle
            with open('init_beam.pkl', 'w') as fid:
                pickle.dump({'list_bunches': list_bunches}, fid)

        # compute and apply initial displacements
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x = np.sqrt(inj_opt['beta_x'] * epsn_x / self.machine.betagamma)
        sigma_y = np.sqrt(inj_opt['beta_y'] * epsn_y / self.machine.betagamma)
        x_kick = x_kick_in_sigmas * sigma_x
        y_kick = y_kick_in_sigmas * sigma_y
        for bunch in list_bunches:
            bunch.x += x_kick
            bunch.y += y_kick

        return list_bunches

    def init_start_ring(self):
        stats_to_store = [
            'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp',
            'sigma_x', 'sigma_y', 'sigma_z', 'sigma_dp', 'epsn_x', 'epsn_y',
            'epsn_z', 'macroparticlenumber', 'i_bunch', 'i_turn'
        ]

        n_stored_turns = len(filling_pattern) * (
            self.ring_of_CPUs.N_turns / self.ring_of_CPUs.N_parellel_rings +
            self.ring_of_CPUs.N_parellel_rings)

        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor(
            'bunch_monitor_ring%03d' % self.ring_of_CPUs.myring,
            n_stored_turns, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=1,
            stats_to_store=stats_to_store)

    def perform_bunch_operations_at_start_ring(self, bunch):
        # Attach bound methods to monitor i_bunch and i_turns
        # (In the future we might upgrade PyHEADTAIL to pass the lambda to the monitor)
        if bunch.macroparticlenumber > 0:
            bunch.i_bunch = types.MethodType(
                lambda self: self.slice_info['i_bunch'], bunch)
            bunch.i_turn = types.MethodType(
                lambda self: self.slice_info['i_turn'], bunch)
            self.bunch_monitor.dump(bunch)

    def slice_bunch_at_start_ring(self, bunch):
        list_slices = sl.slice_a_bunch(bunch, self.z_cut_slicing,
                                       self.n_slices_per_bunch)
        return list_slices

    def treat_piece(self, piece):
        for ele in self.mypart:
            ele.track(piece)

    def merge_slices_at_end_ring(self, list_slices):
        bunch = sl.merge_slices_into_bunch(list_slices)
        return bunch

    def perform_bunch_operations_at_end_ring(self, bunch):
        #finalize present turn (with non parallel part, e.g. synchrotron motion)
        if bunch.macroparticlenumber > 0:
            for ele in self.non_parallel_part:
                ele.track(bunch)

    def piece_to_buffer(self, piece):
        buf = ch.beam_2_buffer(piece)
        return buf

    def buffer_to_piece(self, buf):
        piece = ch.buffer_2_beam(buf)
        return piece
Esempio n. 17
0
class Simulation(object):
    def __init__(self):
        self.N_turns = pp.N_turns
        self.pp = pp

    def init_all(self):

        self.n_slices = pp.n_slices

        # read the optics if needed
        if pp.optics_pickle_file is not None:
            with open(pp.optics_pickle_file) as fid:
                optics = pickle.load(fid)
                self.n_kick_smooth = np.sum(
                    ['_kick_smooth_' in nn for nn in optics['name']])
        else:
            optics = None
            self.n_kick_smooth = pp.n_segments

        # define the machine
        from LHC_custom import LHC
        self.machine = LHC(n_segments=pp.n_segments,
                           machine_configuration=pp.machine_configuration,
                           beta_x=pp.beta_x,
                           beta_y=pp.beta_y,
                           accQ_x=pp.Q_x,
                           accQ_y=pp.Q_y,
                           Qp_x=pp.Qp_x,
                           Qp_y=pp.Qp_y,
                           octupole_knob=pp.octupole_knob,
                           optics_dict=optics,
                           V_RF=pp.V_RF)
        self.n_segments = self.machine.transverse_map.n_segments

        # compute sigma
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x_inj = np.sqrt(inj_opt['beta_x'] * pp.epsn_x /
                              self.machine.betagamma)
        sigma_y_inj = np.sqrt(inj_opt['beta_y'] * pp.epsn_y /
                              self.machine.betagamma)

        if pp.optics_pickle_file is None:
            sigma_x_smooth = sigma_x_inj
            sigma_y_smooth = sigma_y_inj
        else:
            beta_x_smooth = None
            beta_y_smooth = None
            for ele in self.machine.one_turn_map:
                if ele in self.machine.transverse_map:
                    if '_kick_smooth_' in ele.name1:
                        if beta_x_smooth is None:
                            beta_x_smooth = ele.beta_x1
                            beta_y_smooth = ele.beta_y1
                        else:
                            if beta_x_smooth != ele.beta_x1 or beta_y_smooth != ele.beta_y1:
                                raise ValueError(
                                    'Smooth kicks must have all the same beta')

            if beta_x_smooth is None:
                sigma_x_smooth = None
                sigma_y_smooth = None
            else:
                sigma_x_smooth = np.sqrt(beta_x_smooth * pp.epsn_x /
                                         self.machine.betagamma)
                sigma_y_smooth = np.sqrt(beta_y_smooth * pp.epsn_y /
                                         self.machine.betagamma)

        # define MP size
        nel_mp_ref_0 = pp.init_unif_edens_dip * 4 * pp.x_aper * pp.y_aper / pp.N_MP_ele_init_dip

        # prepare e-cloud
        import PyECLOUD.PyEC4PyHT as PyEC4PyHT

        if pp.custom_target_grid_arcs is not None:
            target_grid_arcs = pp.custom_target_grid_arcs
        else:
            target_grid_arcs = {
                'x_min_target':
                -pp.target_size_internal_grid_sigma * sigma_x_smooth,
                'x_max_target':
                pp.target_size_internal_grid_sigma * sigma_x_smooth,
                'y_min_target':
                -pp.target_size_internal_grid_sigma * sigma_y_smooth,
                'y_max_target':
                pp.target_size_internal_grid_sigma * sigma_y_smooth,
                'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_x_smooth
            }
        self.target_grid_arcs = target_grid_arcs

        if pp.enable_arc_dip:
            ecloud_dip = PyEC4PyHT.Ecloud(
                slice_by_slice_mode=True,
                L_ecloud=self.machine.circumference / self.n_kick_smooth *
                pp.fraction_device_dip,
                slicer=None,
                Dt_ref=pp.Dt_ref,
                pyecl_input_folder=pp.pyecl_input_folder,
                chamb_type=pp.chamb_type,
                x_aper=pp.x_aper,
                y_aper=pp.y_aper,
                filename_chm=pp.filename_chm,
                PyPICmode=pp.PyPICmode,
                Dh_sc=pp.Dh_sc_ext,
                N_min_Dh_main=pp.N_min_Dh_main,
                f_telescope=pp.f_telescope,
                N_nodes_discard=pp.N_nodes_discard,
                target_grid=target_grid_arcs,
                init_unif_edens_flag=pp.init_unif_edens_flag_dip,
                init_unif_edens=pp.init_unif_edens_dip,
                N_mp_max=pp.N_mp_max_dip,
                nel_mp_ref_0=nel_mp_ref_0,
                B_multip=pp.B_multip_dip,
                enable_kick_x=pp.enable_kick_x,
                enable_kick_y=pp.enable_kick_y)

        if pp.enable_arc_quad:
            ecloud_quad = PyEC4PyHT.Ecloud(
                slice_by_slice_mode=True,
                L_ecloud=self.machine.circumference / self.n_kick_smooth *
                pp.fraction_device_quad,
                slicer=None,
                Dt_ref=pp.Dt_ref,
                pyecl_input_folder=pp.pyecl_input_folder,
                chamb_type=pp.chamb_type,
                x_aper=pp.x_aper,
                y_aper=pp.y_aper,
                filename_chm=pp.filename_chm,
                PyPICmode=pp.PyPICmode,
                Dh_sc=pp.Dh_sc_ext,
                N_min_Dh_main=pp.N_min_Dh_main,
                f_telescope=pp.f_telescope,
                N_nodes_discard=pp.N_nodes_discard,
                target_grid=target_grid_arcs,
                N_mp_max=pp.N_mp_max_quad,
                nel_mp_ref_0=nel_mp_ref_0,
                B_multip=pp.B_multip_quad,
                filename_init_MP_state=pp.filename_init_MP_state_quad,
                enable_kick_x=pp.enable_kick_x,
                enable_kick_y=pp.enable_kick_y)

        if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_dip:
            with open('multigrid_config_dip.txt', 'w') as fid:
                if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'):
                    fid.write(repr(ecloud_dip.spacech_ele.PyPICobj.grids))
                else:
                    fid.write("Single grid.")

            with open('multigrid_config_dip.pkl', 'w') as fid:
                if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'):
                    pickle.dump(ecloud_dip.spacech_ele.PyPICobj.grids, fid)
                else:
                    pickle.dump('Single grid.', fid)

        if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_quad:
            with open('multigrid_config_quad.txt', 'w') as fid:
                if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'):
                    fid.write(repr(ecloud_quad.spacech_ele.PyPICobj.grids))
                else:
                    fid.write("Single grid.")

            with open('multigrid_config_quad.pkl', 'w') as fid:
                if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'):
                    pickle.dump(ecloud_quad.spacech_ele.PyPICobj.grids, fid)
                else:
                    pickle.dump('Single grid.', fid)

        # setup transverse losses (to "protect" the ecloud)
        import PyHEADTAIL.aperture.aperture as aperture
        apt_xy = aperture.EllipticalApertureXY(
            x_aper=pp.target_size_internal_grid_sigma * sigma_x_inj,
            y_aper=pp.target_size_internal_grid_sigma * sigma_y_inj)
        self.machine.one_turn_map.append(apt_xy)

        if pp.enable_transverse_damper:
            # setup transverse damper
            from PyHEADTAIL.feedback.transverse_damper import TransverseDamper
            damper = TransverseDamper(dampingrate_x=pp.dampingrate_x,
                                      dampingrate_y=pp.dampingrate_y)
            self.machine.one_turn_map.append(damper)

        # We suppose that all the object that cannot be slice parallelized are at the end of the ring
        i_end_parallel = len(
            self.machine.one_turn_map) - pp.n_non_parallelizable

        # split the machine
        sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes)
        myid = self.ring_of_CPUs.myid
        i_start_part, i_end_part = sharing.my_part(myid)
        self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]
        if self.ring_of_CPUs.I_am_a_worker:
            print 'I am id=%d/%d (worker) and my part is %d long' % (
                myid, self.ring_of_CPUs.N_nodes, len(self.mypart))
        elif self.ring_of_CPUs.I_am_the_master:
            self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]
            print 'I am id=%d/%d (master) and my part is %d long' % (
                myid, self.ring_of_CPUs.N_nodes, len(self.mypart))

        #install eclouds in my part
        my_new_part = []
        self.my_list_eclouds = []
        for ele in self.mypart:
            my_new_part.append(ele)
            if ele in self.machine.transverse_map:
                if pp.optics_pickle_file is None or '_kick_smooth_' in ele.name1:
                    if pp.enable_arc_dip:
                        ecloud_dip_new = ecloud_dip.generate_twin_ecloud_with_shared_space_charge(
                        )
                        my_new_part.append(ecloud_dip_new)
                        self.my_list_eclouds.append(ecloud_dip_new)
                    if pp.enable_arc_quad:
                        ecloud_quad_new = ecloud_quad.generate_twin_ecloud_with_shared_space_charge(
                        )
                        my_new_part.append(ecloud_quad_new)
                        self.my_list_eclouds.append(ecloud_quad_new)
                elif '_kick_element_' in ele.name1 and pp.enable_eclouds_at_kick_elements:

                    i_in_optics = list(optics['name']).index(ele.name1)
                    kick_name = optics['name'][i_in_optics]
                    element_name = kick_name.split('_kick_element_')[-1]
                    L_curr = optics['L_interaction'][i_in_optics]

                    buildup_folder = pp.path_buildup_simulations_kick_elements.replace(
                        '!!!NAME!!!', element_name)
                    chamber_fname = '%s_chamber.mat' % (element_name)

                    B_multip_curr = [0., optics['gradB'][i_in_optics]]

                    x_beam_offset = optics['x'][i_in_optics] * pp.orbit_factor
                    y_beam_offset = optics['y'][i_in_optics] * pp.orbit_factor

                    sigma_x_local = np.sqrt(optics['beta_x'][i_in_optics] *
                                            pp.epsn_x / self.machine.betagamma)
                    sigma_y_local = np.sqrt(optics['beta_y'][i_in_optics] *
                                            pp.epsn_y / self.machine.betagamma)

                    ecloud_ele = PyEC4PyHT.Ecloud(
                        slice_by_slice_mode=True,
                        L_ecloud=L_curr,
                        slicer=None,
                        Dt_ref=pp.Dt_ref,
                        pyecl_input_folder=pp.pyecl_input_folder,
                        chamb_type='polyg',
                        x_aper=None,
                        y_aper=None,
                        filename_chm=buildup_folder + '/' + chamber_fname,
                        PyPICmode=pp.PyPICmode,
                        Dh_sc=pp.Dh_sc_ext,
                        N_min_Dh_main=pp.N_min_Dh_main,
                        f_telescope=pp.f_telescope,
                        N_nodes_discard=pp.N_nodes_discard,
                        target_grid={
                            'x_min_target':
                            -pp.target_size_internal_grid_sigma * sigma_x_local
                            + x_beam_offset,
                            'x_max_target':
                            pp.target_size_internal_grid_sigma * sigma_x_local
                            + x_beam_offset,
                            'y_min_target':
                            -pp.target_size_internal_grid_sigma * sigma_y_local
                            + y_beam_offset,
                            'y_max_target':
                            pp.target_size_internal_grid_sigma * sigma_y_local
                            + y_beam_offset,
                            'Dh_target':
                            pp.target_Dh_internal_grid_sigma * sigma_y_local
                        },
                        N_mp_max=pp.N_mp_max_quad,
                        nel_mp_ref_0=nel_mp_ref_0,
                        B_multip=B_multip_curr,
                        filename_init_MP_state=buildup_folder + '/' +
                        pp.name_MP_state_file_kick_elements,
                        x_beam_offset=x_beam_offset,
                        y_beam_offset=y_beam_offset,
                        enable_kick_x=pp.enable_kick_x,
                        enable_kick_y=pp.enable_kick_y)

                    my_new_part.append(ecloud_ele)
                    self.my_list_eclouds.append(ecloud_ele)

        self.mypart = my_new_part

        if pp.footprint_mode:
            print 'Proc. %d computing maps' % myid
            # generate a bunch
            bunch_for_map = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles_for_footprint_map,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)

            # Slice the bunch
            slicer_for_map = UniformBinSlicer(n_slices=pp.n_slices,
                                              z_cuts=(-pp.z_cut, pp.z_cut))
            slices_list_for_map = bunch_for_map.extract_slices(slicer_for_map)

            #Track the previous part of the machine
            for ele in self.machine.one_turn_map[:i_start_part]:
                for ss in slices_list_for_map:
                    ele.track(ss)

            # Measure optics, track and replace clouds with maps
            list_ele_type = []
            list_meas_beta_x = []
            list_meas_alpha_x = []
            list_meas_beta_y = []
            list_meas_alpha_y = []
            for ele in self.mypart:
                list_ele_type.append(str(type(ele)))
                # Measure optics
                bbb = sum(slices_list_for_map)
                list_meas_beta_x.append(bbb.beta_Twiss_x())
                list_meas_alpha_x.append(bbb.alpha_Twiss_x())
                list_meas_beta_y.append(bbb.beta_Twiss_y())
                list_meas_alpha_y.append(bbb.alpha_Twiss_y())

                if ele in self.my_list_eclouds:
                    ele.track_once_and_replace_with_recorded_field_map(
                        slices_list_for_map)
                else:
                    for ss in slices_list_for_map:
                        ele.track(ss)
            print 'Proc. %d done with maps' % myid

            with open('measured_optics_%d.pkl' % myid, 'wb') as fid:
                pickle.dump(
                    {
                        'ele_type': list_ele_type,
                        'beta_x': list_meas_beta_x,
                        'alpha_x': list_meas_alpha_x,
                        'beta_y': list_meas_beta_y,
                        'alpha_y': list_meas_alpha_y,
                    }, fid)

            #remove RF
            if self.ring_of_CPUs.I_am_the_master:
                self.non_parallel_part.remove(self.machine.longitudinal_map)

    def init_master(self):

        # Manage multi-job operation
        if pp.footprint_mode:
            if pp.N_turns != pp.N_turns_target:
                raise ValueError(
                    'In footprint mode you need to set N_turns_target=N_turns_per_run!'
                )

        import PyPARIS_sim_class.Save_Load_Status as SLS
        SimSt = SLS.SimulationStatus(N_turns_per_run=pp.N_turns,
                                     check_for_resubmit=True,
                                     N_turns_target=pp.N_turns_target)
        SimSt.before_simulation()
        self.SimSt = SimSt

        # generate a bunch
        if pp.footprint_mode:
            self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles_for_footprint_track,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)
        elif SimSt.first_run:

            if pp.bunch_from_file is not None:
                print 'Loading bunch from file %s ...' % pp.bunch_from_file
                with h5py.File(pp.bunch_from_file, 'r') as fid:
                    self.bunch = self.buffer_to_piece(
                        np.array(fid['bunch']).copy())
                print 'Bunch loaded from file.\n'

            else:
                self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                    n_macroparticles=pp.n_macroparticles,
                    intensity=pp.intensity,
                    epsn_x=pp.epsn_x,
                    epsn_y=pp.epsn_y,
                    sigma_z=pp.sigma_z)

                # compute initial displacements
                inj_opt = self.machine.transverse_map.get_injection_optics()
                sigma_x = np.sqrt(inj_opt['beta_x'] * pp.epsn_x /
                                  self.machine.betagamma)
                sigma_y = np.sqrt(inj_opt['beta_y'] * pp.epsn_y /
                                  self.machine.betagamma)
                x_kick = pp.x_kick_in_sigmas * sigma_x
                y_kick = pp.y_kick_in_sigmas * sigma_y

                # apply initial displacement
                if not pp.footprint_mode:
                    self.bunch.x += x_kick
                    self.bunch.y += y_kick

                print 'Bunch initialized.'
        else:
            print 'Loading bunch from file...'
            with h5py.File(
                    'bunch_status_part%02d.h5' %
                (SimSt.present_simulation_part - 1), 'r') as fid:
                self.bunch = self.buffer_to_piece(
                    np.array(fid['bunch']).copy())
            print 'Bunch loaded from file.'

        # initial slicing
        self.slicer = UniformBinSlicer(n_slices=pp.n_slices,
                                       z_cuts=(-pp.z_cut, pp.z_cut))

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor(
            'bunch_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        # define a slice monitor
        from PyHEADTAIL.monitors.monitors import SliceMonitor
        self.slice_monitor = SliceMonitor(
            'slice_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns,
            self.slicer, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        #slice for the first turn
        slice_obj_list = self.bunch.extract_slices(self.slicer)

        pieces_to_be_treated = slice_obj_list

        print 'N_turns', self.N_turns

        if pp.footprint_mode:
            self.recorded_particles = ParticleTrajectories(
                pp.n_macroparticles_for_footprint_track, self.N_turns)

        return pieces_to_be_treated

    def init_worker(self):
        pass

    def treat_piece(self, piece):
        for ele in self.mypart:
            ele.track(piece)

    def finalize_turn_on_master(self, pieces_treated):

        # re-merge bunch
        self.bunch = sum(pieces_treated)

        #finalize present turn (with non parallel part, e.g. synchrotron motion)
        for ele in self.non_parallel_part:
            ele.track(self.bunch)

        # save results
        #print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn)
        self.bunch_monitor.dump(self.bunch)
        self.slice_monitor.dump(self.bunch)

        # prepare next turn (re-slice)
        new_pieces_to_be_treated = self.bunch.extract_slices(self.slicer)

        # order reset of all clouds
        orders_to_pass = ['reset_clouds']

        if pp.footprint_mode:
            self.recorded_particles.dump(self.bunch)

        # check if simulation has to be stopped
        # 1. for beam losses
        if not pp.footprint_mode and self.bunch.macroparticlenumber < pp.sim_stop_frac * pp.n_macroparticles:
            orders_to_pass.append('stop')
            self.SimSt.check_for_resubmit = False
            print 'Stop simulation due to beam losses.'

        # 2. for the emittance growth
        if pp.flag_check_emittance_growth:
            epsn_x_max = (pp.epsn_x) * (1 + pp.epsn_x_max_growth_fraction)
            epsn_y_max = (pp.epsn_y) * (1 + pp.epsn_y_max_growth_fraction)
            if not pp.footprint_mode and (self.bunch.epsn_x() > epsn_x_max
                                          or self.bunch.epsn_y() > epsn_y_max):
                orders_to_pass.append('stop')
                self.SimSt.check_for_resubmit = False
                print 'Stop simulation due to emittance growth.'

        return orders_to_pass, new_pieces_to_be_treated

    def execute_orders_from_master(self, orders_from_master):
        if 'reset_clouds' in orders_from_master:
            for ec in self.my_list_eclouds:
                ec.finalize_and_reinitialize()

    def finalize_simulation(self):
        if pp.footprint_mode:
            # Tunes

            import NAFFlib
            print 'NAFFlib spectral analysis...'
            qx_i = np.empty_like(self.recorded_particles.x_i[:, 0])
            qy_i = np.empty_like(self.recorded_particles.x_i[:, 0])
            for ii in range(len(qx_i)):
                qx_i[ii] = NAFFlib.get_tune(self.recorded_particles.x_i[ii] +
                                            1j *
                                            self.recorded_particles.xp_i[ii])
                qy_i[ii] = NAFFlib.get_tune(self.recorded_particles.y_i[ii] +
                                            1j *
                                            self.recorded_particles.yp_i[ii])
            print 'NAFFlib spectral analysis done.'

            # Save
            import h5py
            dict_beam_status = {\
            'x_init': np.squeeze(self.recorded_particles.x_i[:,0]),
            'xp_init': np.squeeze(self.recorded_particles.xp_i[:,0]),
            'y_init': np.squeeze(self.recorded_particles.y_i[:,0]),
            'yp_init': np.squeeze(self.recorded_particles.yp_i[:,0]),
            'z_init': np.squeeze(self.recorded_particles.z_i[:,0]),
            'qx_i': qx_i,
            'qy_i': qy_i,
            'x_centroid': np.mean(self.recorded_particles.x_i, axis=1),
            'y_centroid': np.mean(self.recorded_particles.y_i, axis=1)}

            with h5py.File('footprint.h5', 'w') as fid:
                for kk in dict_beam_status.keys():
                    fid[kk] = dict_beam_status[kk]
        else:
            #save data for multijob operation and launch new job
            import h5py
            with h5py.File(
                    'bunch_status_part%02d.h5' %
                (self.SimSt.present_simulation_part), 'w') as fid:
                fid['bunch'] = self.piece_to_buffer(self.bunch)
            if not self.SimSt.first_run:
                os.system('rm bunch_status_part%02d.h5' %
                          (self.SimSt.present_simulation_part - 1))
            self.SimSt.after_simulation()

    def piece_to_buffer(self, piece):
        buf = ch.beam_2_buffer(piece)
        return buf

    def buffer_to_piece(self, buf):
        piece = ch.buffer_2_beam(buf)
        return piece
Esempio n. 18
0
    def init_master(self):

        # Manage multi-job operation
        if pp.footprint_mode:
            if pp.N_turns != pp.N_turns_target:
                raise ValueError(
                    'In footprint mode you need to set N_turns_target=N_turns_per_run!'
                )

        import PyPARIS_sim_class.Save_Load_Status as SLS
        SimSt = SLS.SimulationStatus(N_turns_per_run=pp.N_turns,
                                     check_for_resubmit=True,
                                     N_turns_target=pp.N_turns_target)
        SimSt.before_simulation()
        self.SimSt = SimSt

        # generate a bunch
        if pp.footprint_mode:
            self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles_for_footprint_track,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)
        elif SimSt.first_run:

            if pp.bunch_from_file is not None:
                print 'Loading bunch from file %s ...' % pp.bunch_from_file
                with h5py.File(pp.bunch_from_file, 'r') as fid:
                    self.bunch = self.buffer_to_piece(
                        np.array(fid['bunch']).copy())
                print 'Bunch loaded from file.\n'

            else:
                self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                    n_macroparticles=pp.n_macroparticles,
                    intensity=pp.intensity,
                    epsn_x=pp.epsn_x,
                    epsn_y=pp.epsn_y,
                    sigma_z=pp.sigma_z)

                # compute initial displacements
                inj_opt = self.machine.transverse_map.get_injection_optics()
                sigma_x = np.sqrt(inj_opt['beta_x'] * pp.epsn_x /
                                  self.machine.betagamma)
                sigma_y = np.sqrt(inj_opt['beta_y'] * pp.epsn_y /
                                  self.machine.betagamma)
                x_kick = pp.x_kick_in_sigmas * sigma_x
                y_kick = pp.y_kick_in_sigmas * sigma_y

                # apply initial displacement
                if not pp.footprint_mode:
                    self.bunch.x += x_kick
                    self.bunch.y += y_kick

                print 'Bunch initialized.'
        else:
            print 'Loading bunch from file...'
            with h5py.File(
                    'bunch_status_part%02d.h5' %
                (SimSt.present_simulation_part - 1), 'r') as fid:
                self.bunch = self.buffer_to_piece(
                    np.array(fid['bunch']).copy())
            print 'Bunch loaded from file.'

        # initial slicing
        self.slicer = UniformBinSlicer(n_slices=pp.n_slices,
                                       z_cuts=(-pp.z_cut, pp.z_cut))

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor(
            'bunch_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        # define a slice monitor
        from PyHEADTAIL.monitors.monitors import SliceMonitor
        self.slice_monitor = SliceMonitor(
            'slice_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns,
            self.slicer, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        #slice for the first turn
        slice_obj_list = self.bunch.extract_slices(self.slicer)

        pieces_to_be_treated = slice_obj_list

        print 'N_turns', self.N_turns

        if pp.footprint_mode:
            self.recorded_particles = ParticleTrajectories(
                pp.n_macroparticles_for_footprint_track, self.N_turns)

        return pieces_to_be_treated
Esempio n. 19
0
class Simulation(object):
    def __init__(self):
        self.N_turns = 2000 
        self.N_buffer_float_size = 10000000
        self.N_buffer_int_size = 20
        self.N_parellel_rings = 4
        
        self.n_slices_per_bunch = 200
        self.z_cut_slicing = 3*sigma_z_bunch
        self.N_pieces_per_transfer = 300
        

    def init_all(self):
        
        print('Exec init...')
        
        self.ring_of_CPUs.verbose = verbose

        from LHC_custom import LHC
        self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration,
                        Qp_x=Qp_x, Qp_y=Qp_y)
        print('Expected Qs = %.3e'%self.machine.Q_s)
        self.n_non_parallelizable = 1 #RF

        inj_optics = self.machine.transverse_map.get_injection_optics()
        sigma_x_smooth = np.sqrt(inj_optics['beta_x']*epsn_x/self.machine.betagamma)
        sigma_y_smooth = np.sqrt(inj_optics['beta_y']*epsn_y/self.machine.betagamma)

        if flag_aperture:
            # setup transverse losses (to "protect" the ecloud)
            import PyHEADTAIL.aperture.aperture as aperture
            apt_xy = aperture.EllipticalApertureXY(x_aper=target_size_internal_grid_sigma*sigma_x_smooth, 
                                                   y_aper=target_size_internal_grid_sigma*sigma_x_smooth)
            self.machine.one_turn_map.append(apt_xy)
            self.n_non_parallelizable +=1 

        if enable_transverse_damper:
            # setup transverse damper
            from PyHEADTAIL.feedback.transverse_damper import TransverseDamper
            damper = TransverseDamper(dampingrate_x=dampingrate_x, dampingrate_y=dampingrate_y)
            self.machine.one_turn_map.append(damper)
            self.n_non_parallelizable +=1
            
        # split the machine
        i_end_parallel = len(self.machine.one_turn_map)-self.n_non_parallelizable
        sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes_per_ring)
        i_start_part, i_end_part = sharing.my_part(self.ring_of_CPUs.myid_in_ring)
        self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]

        if self.ring_of_CPUs.I_am_at_end_ring:
            self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]
            print('Hello, I am %d.%d, my part looks like: %s.'%(self.ring_of_CPUs.myring, self.ring_of_CPUs.myid_in_ring, self.mypart))
            


        
    def init_master(self):
        
        print('Building the beam!')
        
        from scipy.constants import c as clight, e as qe
        from PyHEADTAIL.particles.slicing import UniformBinSlicer
        
        import gen_multibunch_beam as gmb
        list_bunches = gmb.gen_matched_multibunch_beam(self.machine, macroparticlenumber, filling_pattern, b_spac_s, 
                bunch_intensity, epsn_x, epsn_y, sigma_z, non_linear_long_matching, min_inten_slice4EC)
                
        if pickle_beam:
            import pickle
            with open('init_beam.pkl', 'w') as fid:
                pickle.dump({'list_bunches': list_bunches}, fid)

        # compute and apply initial displacements
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x = np.sqrt(inj_opt['beta_x']*epsn_x/self.machine.betagamma)
        sigma_y = np.sqrt(inj_opt['beta_y']*epsn_y/self.machine.betagamma)
        x_kick = x_kick_in_sigmas*sigma_x
        y_kick = y_kick_in_sigmas*sigma_y
        for bunch in list_bunches:
            bunch.x += x_kick
            bunch.y += y_kick
            bunch.z += z_kick_in_m


        return list_bunches

    def init_start_ring(self):
        stats_to_store = [
         'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp',
         'sigma_x', 'sigma_y', 'sigma_z','sigma_dp', 'epsn_x', 'epsn_y',
         'epsn_z', 'macroparticlenumber',
         'i_bunch', 'i_turn']

        n_stored_turns = len(filling_pattern)*(self.ring_of_CPUs.N_turns/self.ring_of_CPUs.N_parellel_rings + self.ring_of_CPUs.N_parellel_rings)

        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor('bunch_monitor_ring%03d'%self.ring_of_CPUs.myring,
                            n_stored_turns, 
                            {'Comment':'PyHDTL simulation'}, 
                            write_buffer_every = 1,
                            stats_to_store = stats_to_store)

    def perform_bunch_operations_at_start_ring(self, bunch):
        # Attach bound methods to monitor i_bunch and i_turns 
        # (In the future we might upgrade PyHEADTAIL to pass the lambda to the monitor)
        if bunch.macroparticlenumber>0:
            bunch.i_bunch = types.MethodType(lambda self: self.slice_info['i_bunch'], bunch)
            bunch.i_turn = types.MethodType(lambda self: self.slice_info['i_turn'], bunch)
            self.bunch_monitor.dump(bunch)        

    def slice_bunch_at_start_ring(self, bunch):
        list_slices = sl.slice_a_bunch(bunch, self.z_cut_slicing, self.n_slices_per_bunch)
        return list_slices

    def treat_piece(self, piece):
        for ele in self.mypart: 
                ele.track(piece)
        
    def merge_slices_at_end_ring(self, list_slices):
        bunch = sl.merge_slices_into_bunch(list_slices)
        return bunch

    def perform_bunch_operations_at_end_ring(self, bunch):
        #finalize present turn (with non parallel part, e.g. synchrotron motion)
        if bunch.macroparticlenumber>0:
            for ele in self.non_parallel_part:
                ele.track(bunch)


    def piece_to_buffer(self, piece):
        buf = ch.beam_2_buffer(piece)
        return buf

    def buffer_to_piece(self, buf):
        piece = ch.buffer_2_beam(buf)
        return piece
Esempio n. 20
0
def run(job_id, accQ_y):
    it = job_id

    # SIMULATION PARAMETERS
    # =====================

    # Simulation parameters
    n_turns = 10000
    n_macroparticles = 100000  # per bunch

    # MACHINE PARAMETERS
    # ==================

    intensity = 1e13  # protons
    #    intensity = 2*4e12 # protons
    E0 = 71e6  # Kinetic energy [eV]
    p0 = np.sqrt((m_p_MeV + E0)**2 - m_p_MeV**2) * e / c

    print('Beam kinetic energy: ' + str(E0 * 1e-6) + ' MeV')
    print('Beam momentum: ' + str(p0 * 1e-6 * c / e) + ' MeV/c')

    accQ_x = 4.31  # Horizontal tune
    #    accQ_y = 3.80 # Vertical tune is an input argument

    chroma = -1.4  # Chromaticity

    alpha = 5.034**-2  # momentum compaction factor

    circumference = 160.  # [meters]

    # Approximated average beta functions (lumped wake normalizations)
    beta_x = circumference / (2. * np.pi * accQ_x)
    beta_y = circumference / (2. * np.pi * accQ_y)

    # Harmonic number for RF
    h_RF = [2]  # a list of harmonic number for RF
    V_RF = [5e3 * 2]  # a list of RF voltages
    p_increment = 0.
    dphi_RF = [np.pi]  # a list of RF phases
    #    dphi_RF = 0.

    # non-linear longitudinal mode includes RF, otherwise linear needs synhotron tune Q_s
    longitudinal_mode = 'non-linear'
    #    Q_s=0.02 # Longitudinal tune

    optics_mode = 'smooth'
    n_segments = 1
    s = None
    alpha_x = None
    alpha_y = None
    D_x = 0
    D_y = 0
    charge = e
    mass = m_p
    name = None
    app_x = 0
    app_y = 0
    app_xy = 0

    # Creates PyHEADTAIL object for the synchotron
    machine = Synchrotron(optics_mode=optics_mode,
                          circumference=circumference,
                          n_segments=n_segments,
                          s=s,
                          name=name,
                          alpha_x=alpha_x,
                          beta_x=beta_x,
                          D_x=D_x,
                          alpha_y=alpha_y,
                          beta_y=beta_y,
                          D_y=D_y,
                          accQ_x=accQ_x,
                          accQ_y=accQ_y,
                          Qp_x=chroma,
                          Qp_y=chroma,
                          app_x=app_x,
                          app_y=app_y,
                          app_xy=app_xy,
                          alpha_mom_compaction=alpha,
                          longitudinal_mode=longitudinal_mode,
                          h_RF=np.atleast_1d(h_RF),
                          V_RF=np.atleast_1d(V_RF),
                          dphi_RF=np.atleast_1d(dphi_RF),
                          p0=p0,
                          p_increment=p_increment,
                          charge=charge,
                          mass=mass)

    print('')
    print('machine.beta: ')
    print(machine.beta)
    print('')

    print('')
    print('machine.gamma: ')
    print(machine.gamma)
    print('')

    epsn_x = 300e-6
    epsn_y = 300e-6
    sigma_z = 15  # bunch length in meters to be matched to the bucket

    # Creates transverse macroparticle distribution
    allbunches = machine.generate_6D_Gaussian_bunch(n_macroparticles,
                                                    intensity, epsn_x, epsn_y,
                                                    sigma_z)

    # Creates longitudinal macroparticle distribution
    rfb = RFBucket(circumference, machine.gamma, m_p, e, [alpha], 0., h_RF,
                   V_RF, dphi_RF)
    rfb_matcher = RFBucketMatcher(rfb, WaterbagDistribution, sigma_z=sigma_z)

    rfb_matcher.integrationmethod = 'cumtrapz'

    z, dp, _, _ = rfb_matcher.generate(n_macroparticles)
    np.copyto(allbunches.z, z)
    np.copyto(allbunches.dp, dp)

    # Slicer object, which used for wakefields and slice monitors
    slicer = UniformBinSlicer(75, z_cuts=(-2. * sigma_z, 2. * sigma_z))

    # WAKE FIELDS
    # ===========

    # Length of the wake function in turns, wake
    n_turns_wake = 150

    # Parameters for a resonator
    # frequency is in the units of (mode-Q_frac), where
    #       mode: integer number of coupled bunch mode (1 matches to the observations)
    #       Q_frac: resonance fractional tune

    f_r = (1 - 0.83) * 1. / (circumference / (c * machine.beta))
    Q = 15
    R = 1.0e6

    # Renator wake object, which is added to the one turn map
    wakes = CircularResonator(R, f_r, Q, n_turns_wake=n_turns_wake)
    wake_field = WakeField(slicer, wakes)
    machine.one_turn_map.append(wake_field)

    # CREATE MONITORS
    # ===============
    simulation_parameters_dict = {'gamma'           : machine.gamma,\
                                  'intensity'       : intensity,\
                                  'Qx'              : accQ_x,\
                                  'Qy'              : accQ_y,\
#                                  'Qs'              : Q_s,\
                                  'beta_x'          : beta_x,\
                                  'beta_y'          : beta_y,\
    #                               'beta_z'          : bucket.beta_z,\
                                  'epsn_x'          : epsn_x,\
                                  'epsn_y'          : epsn_y,\
                                  'sigma_z'         : sigma_z,\
                                 }
    # Bunch monitor strores bunch average positions for all the bunches
    bunchmonitor = BunchMonitor(outputpath + '/bunchmonitor_{:04d}'.format(it),
                                n_turns,
                                simulation_parameters_dict,
                                write_buffer_every=32,
                                buffer_size=32)

    # Slice monitors saves slice-by-slice data for each bunch
    slicemonitor = SliceMonitor(outputpath +
                                '/slicemonitor_{:01d}_{:04d}'.format(0, it),
                                60,
                                slicer,
                                simulation_parameters_dict,
                                write_buffer_every=60,
                                buffer_size=60)

    # Counter for a number of turns stored to slice monitors
    s_cnt = 0

    # TRACKING LOOP
    # =============
    monitor_active = False
    print('\n--> Begin tracking...\n')

    for i in range(n_turns):
        t0 = time.clock()

        # Tracks beam through the one turn map simulation map
        machine.track(allbunches)

        # Stores bunch mean coordinate values
        bunchmonitor.dump(allbunches)

        # If the total oscillation amplitude of bunches exceeds the threshold
        # or the simulation is running on the last turns, triggers the slice
        # monitors for headtail motion data
        if (allbunches.mean_x() > 1e-1 or allbunches.mean_y() > 1e-1 or i >
            (n_turns - 64)):
            monitor_active = True

        # saves slice monitor data if monitors are activated and less than
        # 64 turns have been stored
        if monitor_active and s_cnt < 64:
            slicemonitor.dump(allbunches)
            s_cnt += 1
        elif s_cnt == 64:
            break

        # If this script is runnin on the first processor, prints the current
        # bunch coordinates and emittances
        if (i % 100 == 0):
            print(
                '{:4d} \t {:+3e} \t {:+3e} \t {:+3e} \t {:3e} \t {:3e} \t {:3f} \t {:3f} \t {:3f} \t {:3s}'
                .format(i, allbunches.mean_x(), allbunches.mean_y(),
                        allbunches.mean_z(), allbunches.epsn_x(),
                        allbunches.epsn_y(), allbunches.epsn_z(),
                        allbunches.sigma_z(), allbunches.sigma_dp(),
                        str(time.clock() - t0)))
    def perform_bunch_operations_at_start_ring(self, bunch):
        
        if self.bunch_monitor is None:
            
            simstate_part = bunch.slice_info['simstate_part']
            stats_to_store = [
             'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp',
             'sigma_x', 'sigma_y', 'sigma_z','sigma_dp', 'epsn_x', 'epsn_y',
             'epsn_z', 'macroparticlenumber',
             'i_bunch', 'i_turn']

            n_stored_turns = np.sum(np.array(filling_pattern)>0)*(\
                self.ring_of_CPUs.N_turns/self.ring_of_CPUs.N_parellel_rings\
                + self.ring_of_CPUs.N_parellel_rings)

            from PyHEADTAIL.monitors.monitors import BunchMonitor
            self.bunch_monitor = BunchMonitor(
                                'bunch_monitor_part%03d_ring%03d'%(
                                    simstate_part, self.ring_of_CPUs.myring),
                                n_stored_turns, 
                                {'Comment':'PyHDTL simulation'}, 
                                write_buffer_every = 1,
                                stats_to_store = stats_to_store)

            # define a slice monitor 
            z_left = bunch.slice_info['z_bin_left'] - bunch.slice_info['z_bin_center']
            z_right = bunch.slice_info['z_bin_right'] - bunch.slice_info['z_bin_center']
            slicer = UniformBinSlicer(n_slices = self.n_slices_per_bunch, z_cuts=(z_left, z_right))
            from PyHEADTAIL.monitors.monitors import SliceMonitor
            self.slice_monitor = SliceMonitor('slice_monitor_part%03d_ring%03d'%(
                simstate_part, self.ring_of_CPUs.myring),
                n_stored_turns, slicer,  {'Comment':'PyHDTL simulation'}, 
                write_buffer_every = 1, bunch_stats_to_store=stats_to_store,
                slice_stats_to_store='mean_x mean_y mean_z n_macroparticles_per_slice'.split())
        
    

        # Save bunch properties
        if bunch.macroparticlenumber > 0 and bunch.slice_info['i_turn'] < self.N_turns:
            # Attach bound methods to monitor i_bunch and i_turns 
            bunch.i_bunch = types.MethodType(lambda ss: ss.slice_info['i_bunch'], bunch)
            bunch.i_turn = types.MethodType(lambda ss: ss.slice_info['i_turn'], bunch)
            self.bunch_monitor.dump(bunch)

            # Monitor slice wrt bunch center
            bunch.z -= bunch.slice_info['z_bin_center']
            self.slice_monitor.dump(bunch)
            bunch.z += bunch.slice_info['z_bin_center']

        # Save full beam at user-defined positions
        if bunch.slice_info['i_turn'] in self.save_beam_at_turns:
            dirname = 'bunch_states_turn%d'%bunch.slice_info['i_turn']
            import PyPARIS.gen_multibunch_beam as gmb
            gmb.save_bunch_to_folder(bunch, dirname)

        # Save full beam at end simulation 
        if bunch.slice_info['i_turn'] == self.N_turns:
            # PyPARIS wants N_turns to be a multiple of N_parellel_rings
            assert(self.ring_of_CPUs.I_am_the_master) 
            dirname = 'beam_status_part%02d'%(self.SimSt.present_simulation_part)
            import PyPARIS.gen_multibunch_beam as gmb
            gmb.save_bunch_to_folder(bunch, dirname)
            if bunch.slice_info['i_bunch'] == bunch.slice_info['N_bunches_tot_beam'] - 1:
                if not self.SimSt.first_run:
                    os.system('rm -r beam_status_part%02d' % (self.SimSt.present_simulation_part - 1))
                self.SimSt.after_simulation()
Esempio n. 22
0
class Simulation(object):
    def __init__(self):
        self.N_turns = 10000
        self.N_buffer_float_size = 10000000
        self.N_buffer_int_size = 20
        self.N_parellel_rings = 2
        
        self.n_slices_per_bunch = 200
        self.z_cut_slicing = 3*sigma_z_bunch
        self.N_pieces_per_transfer = 300
        

    def init_all(self):
        
        print('Exec init...')
        
        self.ring_of_CPUs.verbose = verbose

        from LHC_custom import LHC
        self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration,
                        Qp_x=Qp_x, Qp_y=Qp_y)
        self.n_non_parallelizable = 1 #RF

        inj_optics = self.machine.transverse_map.get_injection_optics()
        sigma_x_smooth = np.sqrt(inj_optics['beta_x']*epsn_x/self.machine.betagamma)
        sigma_y_smooth = np.sqrt(inj_optics['beta_y']*epsn_y/self.machine.betagamma)

        if flag_aperture:
            # setup transverse losses (to "protect" the ecloud)
            import PyHEADTAIL.aperture.aperture as aperture
            apt_xy = aperture.EllipticalApertureXY(x_aper=target_size_internal_grid_sigma*sigma_x_smooth, 
                                                   y_aper=target_size_internal_grid_sigma*sigma_x_smooth)
            self.machine.one_turn_map.append(apt_xy)
            self.n_non_parallelizable +=1 

        if enable_transverse_damper:
            # setup transverse damper
            from PyHEADTAIL.feedback.transverse_damper import TransverseDamper
            damper = TransverseDamper(dampingrate_x=dampingrate_x, dampingrate_y=dampingrate_y)
            self.machine.one_turn_map.append(damper)
            self.n_non_parallelizable +=1
            
        if enable_ecloud:
            print('Build ecloud...')
            import PyECLOUD.PyEC4PyHT as PyEC4PyHT
            ecloud = PyEC4PyHT.Ecloud(
                    L_ecloud=L_ecloud_tot/n_segments, slicer=None, slice_by_slice_mode=True,
                    Dt_ref=5e-12, pyecl_input_folder='./pyecloud_config',
                    chamb_type = 'polyg' ,
                    filename_chm= 'LHC_chm_ver.mat', 
                    #init_unif_edens_flag=1,
                    #init_unif_edens=1e7,
                    #N_mp_max = 3000000,
                    #nel_mp_ref_0 = 1e7/(0.7*3000000),
                    #B_multip = [0.],
                    #~ PyPICmode = 'ShortleyWeller_WithTelescopicGrids',
                    #~ f_telescope = 0.3,
                    target_grid = {'x_min_target':-target_size_internal_grid_sigma*sigma_x_smooth, 'x_max_target':target_size_internal_grid_sigma*sigma_x_smooth,
                                   'y_min_target':-target_size_internal_grid_sigma*sigma_y_smooth,'y_max_target':target_size_internal_grid_sigma*sigma_y_smooth,
                                   'Dh_target':.2*sigma_x_smooth},
                    #~ N_nodes_discard = 10.,
                    #~ N_min_Dh_main = 10,
                    #x_beam_offset = x_beam_offset,
                    #y_beam_offset = y_beam_offset,
                    #probes_position = probes_position,
                    save_pyecl_outp_as = 'cloud_evol_ring%d'%self.ring_of_CPUs.myring,
                    sparse_solver = 'PyKLU', enable_kick_x=enable_kick_x, enable_kick_y=enable_kick_y)
            print('Done.')



        # split the machine
        i_end_parallel = len(self.machine.one_turn_map)-self.n_non_parallelizable
        sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes_per_ring)
        i_start_part, i_end_part = sharing.my_part(self.ring_of_CPUs.myid_in_ring)
        self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]

        if self.ring_of_CPUs.I_am_at_end_ring:
            self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]
            

        #install eclouds in my part
        if enable_ecloud:
            my_new_part = []
            self.my_list_eclouds = []
            for ele in self.mypart:
                if ele in self.machine.transverse_map:
                    ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge()
                    
                    # we save buildup info only for the first cloud in each ring
                    if self.ring_of_CPUs.myid_in_ring>0 or len(self.my_list_eclouds)>0:
                        ecloud_new.remove_savers()
                    
                    my_new_part.append(ecloud_new)
                    self.my_list_eclouds.append(ecloud_new)
                my_new_part.append(ele)

            self.mypart = my_new_part
            
            print('Hello, I am %d.%d, my part looks like: %s. Saver status: %s'%(self.ring_of_CPUs.myring, self.ring_of_CPUs.myid_in_ring, self.mypart, [(ec.cloudsim.cloud_list[0].pyeclsaver is not None) for ec in self.my_list_eclouds]))
            


        
    def init_master(self):
        
        print('Building the beam!')
        
        from scipy.constants import c as clight, e as qe
        from PyHEADTAIL.particles.slicing import UniformBinSlicer
        
        import PyPARIS.gen_multibunch_beam as gmb
        list_bunches = gmb.gen_matched_multibunch_beam(self.machine, macroparticlenumber, filling_pattern, b_spac_s, 
                bunch_intensity, epsn_x, epsn_y, sigma_z, non_linear_long_matching, min_inten_slice4EC)
                
        if pickle_beam:
            import pickle
            with open('init_beam.pkl', 'w') as fid:
                pickle.dump({'list_bunches': list_bunches}, fid)

        # compute and apply initial displacements
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x = np.sqrt(inj_opt['beta_x']*epsn_x/self.machine.betagamma)
        sigma_y = np.sqrt(inj_opt['beta_y']*epsn_y/self.machine.betagamma)
        x_kick = x_kick_in_sigmas*sigma_x
        y_kick = y_kick_in_sigmas*sigma_y
        for bunch in list_bunches:
            bunch.x += x_kick
            bunch.y += y_kick


        return list_bunches

    def init_start_ring(self):
        stats_to_store = [
         'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp',
         'sigma_x', 'sigma_y', 'sigma_z','sigma_dp', 'epsn_x', 'epsn_y',
         'epsn_z', 'macroparticlenumber',
         'i_bunch', 'i_turn']

        n_stored_turns = len(filling_pattern)*(self.ring_of_CPUs.N_turns/self.ring_of_CPUs.N_parellel_rings + self.ring_of_CPUs.N_parellel_rings)

        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor('bunch_monitor_ring%03d'%self.ring_of_CPUs.myring,
                            n_stored_turns, 
                            {'Comment':'PyHDTL simulation'}, 
                            write_buffer_every = 1,
                            stats_to_store = stats_to_store)

    def perform_bunch_operations_at_start_ring(self, bunch):
        # Attach bound methods to monitor i_bunch and i_turns 
        # (In the future we might upgrade PyHEADTAIL to pass the lambda to the monitor)
        if bunch.macroparticlenumber>0:
            bunch.i_bunch = types.MethodType(lambda self: self.slice_info['i_bunch'], bunch)
            bunch.i_turn = types.MethodType(lambda self: self.slice_info['i_turn'], bunch)
            self.bunch_monitor.dump(bunch)        

    def slice_bunch_at_start_ring(self, bunch):
        list_slices = sl.slice_a_bunch(bunch, self.z_cut_slicing, self.n_slices_per_bunch)
        return list_slices

    def treat_piece(self, piece):
        for ele in self.mypart: 
                ele.track(piece)
        
    def merge_slices_at_end_ring(self, list_slices):
        bunch = sl.merge_slices_into_bunch(list_slices)
        return bunch

    def perform_bunch_operations_at_end_ring(self, bunch):
        #finalize present turn (with non parallel part, e.g. synchrotron motion)
        if bunch.macroparticlenumber>0:
            for ele in self.non_parallel_part:
                ele.track(bunch)


    def piece_to_buffer(self, piece):
        buf = ch.beam_2_buffer(piece)
        return buf

    def buffer_to_piece(self, buf):
        piece = ch.buffer_2_beam(buf)
        return piece
class Simulation(object):
    def __init__(self):
        self.N_turns = 576
        self.N_buffer_float_size = 10000000
        self.N_buffer_int_size = 20
        self.N_parellel_rings = 96
        
        self.n_slices_per_bunch = 200
        self.z_cut_slicing = 3*sigma_z_bunch
        self.N_pieces_per_transfer = 300
        self.verbose = False
        self.mpi_verbose = False
        self.enable_barriers = True
        self.save_beam_at_turns = []

    def init_all(self):
        
        print('Exec init...')
        
        from LHC_custom import LHC
        self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration,
                        Qp_x=Qp_x, Qp_y=Qp_y,
                        octupole_knob=octupole_knob)
        self.n_non_parallelizable = 1 #RF

        inj_optics = self.machine.transverse_map.get_injection_optics()
        sigma_x_smooth = np.sqrt(inj_optics['beta_x']*epsn_x/self.machine.betagamma)
        sigma_y_smooth = np.sqrt(inj_optics['beta_y']*epsn_y/self.machine.betagamma)

        if flag_aperture:
            # setup transverse losses (to "protect" the ecloud)
            import PyHEADTAIL.aperture.aperture as aperture
            apt_xy = aperture.EllipticalApertureXY(x_aper=target_size_internal_grid_sigma*sigma_x_smooth, 
                                                   y_aper=target_size_internal_grid_sigma*sigma_x_smooth)
            self.machine.one_turn_map.append(apt_xy)
            self.n_non_parallelizable +=1 

        if enable_transverse_damper:
            # setup transverse damper
            from PyHEADTAIL.feedback.transverse_damper import TransverseDamper
            damper = TransverseDamper(dampingrate_x=dampingrate_x, dampingrate_y=dampingrate_y)
            self.machine.one_turn_map.append(damper)
            self.n_non_parallelizable +=1
            
        if enable_ecloud:
            print('Build ecloud...')
            import PyECLOUD.PyEC4PyHT as PyEC4PyHT
            ecloud = PyEC4PyHT.Ecloud(
                    L_ecloud=L_ecloud_tot/n_segments, slicer=None, slice_by_slice_mode=True,
                    Dt_ref=5e-12, pyecl_input_folder='./pyecloud_config',
                    chamb_type = 'polyg' ,
                    filename_chm= 'LHC_chm_ver.mat', 
                    #init_unif_edens_flag=1,
                    #init_unif_edens=1e7,
                    #N_mp_max = 3000000,
                    #nel_mp_ref_0 = 1e7/(0.7*3000000),
                    #B_multip = [0.],
                    #~ PyPICmode = 'ShortleyWeller_WithTelescopicGrids',
                    #~ f_telescope = 0.3,
                    target_grid = {'x_min_target':-target_size_internal_grid_sigma*sigma_x_smooth, 'x_max_target':target_size_internal_grid_sigma*sigma_x_smooth,
                                   'y_min_target':-target_size_internal_grid_sigma*sigma_y_smooth,'y_max_target':target_size_internal_grid_sigma*sigma_y_smooth,
                                   'Dh_target':.2*sigma_x_smooth},
                    #~ N_nodes_discard = 10.,
                    #~ N_min_Dh_main = 10,
                    #x_beam_offset = x_beam_offset,
                    #y_beam_offset = y_beam_offset,
                    #probes_position = probes_position,
                    save_pyecl_outp_as = 'cloud_evol_ring%d'%self.ring_of_CPUs.myring,
                    save_only = ['lam_t_array', 'nel_hist', 'Nel_timep', 't', 't_hist', 'xg_hist'],
                    sparse_solver = 'PyKLU', enable_kick_x=enable_kick_x, enable_kick_y=enable_kick_y)
            print('Done.')



        # split the machine
        i_end_parallel = len(self.machine.one_turn_map)-self.n_non_parallelizable
        sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes_per_ring)
        i_start_part, i_end_part = sharing.my_part(self.ring_of_CPUs.myid_in_ring)
        self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]

        if self.ring_of_CPUs.I_am_at_end_ring:
            self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]
            

        #install eclouds in my part
        if enable_ecloud:
            my_new_part = []
            self.my_list_eclouds = []
            for ele in self.mypart:
                if ele in self.machine.transverse_map:
                    ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge()
                    
                    # we save buildup info only for the first cloud in each ring
                    if self.ring_of_CPUs.myid_in_ring>0 or len(self.my_list_eclouds)>0:
                        ecloud_new.remove_savers()
                    
                    my_new_part.append(ecloud_new)
                    self.my_list_eclouds.append(ecloud_new)
                my_new_part.append(ele)

            self.mypart = my_new_part
            
            print('Hello, I am %d.%d, my part looks like: %s. Saver status: %s'%(
                self.ring_of_CPUs.myring, self.ring_of_CPUs.myid_in_ring, self.mypart, 
                [(ec.cloudsim.cloud_list[0].pyeclsaver is not None) for ec in self.my_list_eclouds]))
            
       
    def init_master(self):
        
        import PyPARIS.gen_multibunch_beam as gmb
        from scipy.constants import c as clight, e as qe
        from PyHEADTAIL.particles.slicing import UniformBinSlicer
        
        # Manage multi-run operation
        import Save_Load_Status as SLS
        SimSt = SLS.SimulationStatus(N_turns_per_run=self.N_turns,
                check_for_resubmit = False, N_turns_target=N_turns_target)
        SimSt.before_simulation()
        self.SimSt = SimSt

        if SimSt.first_run:
            if load_beam_from_folder is None:
                print('Building the beam!')
                list_bunches = gmb.gen_matched_multibunch_beam(self.machine, macroparticlenumber, filling_pattern, b_spac_s, 
                    bunch_intensity, epsn_x, epsn_y, sigma_z, non_linear_long_matching, min_inten_slice4EC)
                # compute and apply initial displacements
                inj_opt = self.machine.transverse_map.get_injection_optics()
                sigma_x = np.sqrt(inj_opt['beta_x']*epsn_x/self.machine.betagamma)
                sigma_y = np.sqrt(inj_opt['beta_y']*epsn_y/self.machine.betagamma)
                x_kick = x_kick_in_sigmas*sigma_x
                y_kick = y_kick_in_sigmas*sigma_y
                for bunch in list_bunches:
                    bunch.x += x_kick
                    bunch.y += y_kick
            else:
                # Load based on input
                list_bunches = gmb.load_multibunch_beam(load_beam_from_folder)
        else:
            # Load from previous run
            print 'Loading beam from file...'
            dirname = 'beam_status_part%02d'%(SimSt.present_simulation_part-1)
            list_bunches = gmb.load_multibunch_beam(dirname) 
            print 'Loaded beam from file.'
        
        for bb in list_bunches:
            bb.slice_info['simstate_part'] = self.SimSt.present_simulation_part

        return list_bunches


    def init_start_ring(self):
        self.bunch_monitor = None
    
    def perform_bunch_operations_at_start_ring(self, bunch):
        
        if self.bunch_monitor is None:
            
            simstate_part = bunch.slice_info['simstate_part']
            stats_to_store = [
             'mean_x', 'mean_xp', 'mean_y', 'mean_yp', 'mean_z', 'mean_dp',
             'sigma_x', 'sigma_y', 'sigma_z','sigma_dp', 'epsn_x', 'epsn_y',
             'epsn_z', 'macroparticlenumber',
             'i_bunch', 'i_turn']

            n_stored_turns = np.sum(np.array(filling_pattern)>0)*(\
                self.ring_of_CPUs.N_turns/self.ring_of_CPUs.N_parellel_rings\
                + self.ring_of_CPUs.N_parellel_rings)

            from PyHEADTAIL.monitors.monitors import BunchMonitor
            self.bunch_monitor = BunchMonitor(
                                'bunch_monitor_part%03d_ring%03d'%(
                                    simstate_part, self.ring_of_CPUs.myring),
                                n_stored_turns, 
                                {'Comment':'PyHDTL simulation'}, 
                                write_buffer_every = 1,
                                stats_to_store = stats_to_store)

            # define a slice monitor 
            z_left = bunch.slice_info['z_bin_left'] - bunch.slice_info['z_bin_center']
            z_right = bunch.slice_info['z_bin_right'] - bunch.slice_info['z_bin_center']
            slicer = UniformBinSlicer(n_slices = self.n_slices_per_bunch, z_cuts=(z_left, z_right))
            from PyHEADTAIL.monitors.monitors import SliceMonitor
            self.slice_monitor = SliceMonitor('slice_monitor_part%03d_ring%03d'%(
                simstate_part, self.ring_of_CPUs.myring),
                n_stored_turns, slicer,  {'Comment':'PyHDTL simulation'}, 
                write_buffer_every = 1, bunch_stats_to_store=stats_to_store,
                slice_stats_to_store='mean_x mean_y mean_z n_macroparticles_per_slice'.split())
        
    

        # Save bunch properties
        if bunch.macroparticlenumber > 0 and bunch.slice_info['i_turn'] < self.N_turns:
            # Attach bound methods to monitor i_bunch and i_turns 
            bunch.i_bunch = types.MethodType(lambda ss: ss.slice_info['i_bunch'], bunch)
            bunch.i_turn = types.MethodType(lambda ss: ss.slice_info['i_turn'], bunch)
            self.bunch_monitor.dump(bunch)

            # Monitor slice wrt bunch center
            bunch.z -= bunch.slice_info['z_bin_center']
            self.slice_monitor.dump(bunch)
            bunch.z += bunch.slice_info['z_bin_center']

        # Save full beam at user-defined positions
        if bunch.slice_info['i_turn'] in self.save_beam_at_turns:
            dirname = 'bunch_states_turn%d'%bunch.slice_info['i_turn']
            import PyPARIS.gen_multibunch_beam as gmb
            gmb.save_bunch_to_folder(bunch, dirname)

        # Save full beam at end simulation 
        if bunch.slice_info['i_turn'] == self.N_turns:
            # PyPARIS wants N_turns to be a multiple of N_parellel_rings
            assert(self.ring_of_CPUs.I_am_the_master) 
            dirname = 'beam_status_part%02d'%(self.SimSt.present_simulation_part)
            import PyPARIS.gen_multibunch_beam as gmb
            gmb.save_bunch_to_folder(bunch, dirname)
            if bunch.slice_info['i_bunch'] == bunch.slice_info['N_bunches_tot_beam'] - 1:
                if not self.SimSt.first_run:
                    os.system('rm -r beam_status_part%02d' % (self.SimSt.present_simulation_part - 1))
                self.SimSt.after_simulation()

    def slice_bunch_at_start_ring(self, bunch):
        list_slices = sl.slice_a_bunch(bunch, self.z_cut_slicing, self.n_slices_per_bunch)
        return list_slices

    def treat_piece(self, piece):
        for ele in self.mypart: 
            ele.track(piece)
        
    def merge_slices_at_end_ring(self, list_slices):
        bunch = sl.merge_slices_into_bunch(list_slices)
        return bunch

    def perform_bunch_operations_at_end_ring(self, bunch):
        #finalize present turn (with non parallel part, e.g. synchrotron motion)
        if bunch.macroparticlenumber>0:
            for ele in self.non_parallel_part:
                ele.track(bunch)

    def piece_to_buffer(self, piece):
        buf = ch.beam_2_buffer(piece)
        return buf

    def buffer_to_piece(self, buf):
        piece = ch.buffer_2_beam(buf)
        return piece
class Simulation(object):
	def __init__(self):
		self.N_turns = N_turns

	def init_all(self):

		
		self.n_slices = n_slices
		self.n_segments = n_segments

		# define the machine
		from LHC_custom import LHC
		self.machine = LHC(n_segments = n_segments, machine_configuration = machine_configuration)
		
		# define MP size
		nel_mp_ref_0 = init_unif_edens*4*x_aper*y_aper/N_MP_ele_init
		
		# prepare e-cloud
		import PyECLOUD.PyEC4PyHT as PyEC4PyHT
		ecloud = PyEC4PyHT.Ecloud(slice_by_slice_mode=True,
						L_ecloud=self.machine.circumference/n_segments, slicer=None , 
						Dt_ref=Dt_ref, pyecl_input_folder=pyecl_input_folder,
						chamb_type = chamb_type,
						x_aper=x_aper, y_aper=y_aper,
						filename_chm=filename_chm, Dh_sc=Dh_sc,
						init_unif_edens_flag=init_unif_edens_flag,
						init_unif_edens=init_unif_edens, 
						N_mp_max=N_mp_max,
						nel_mp_ref_0=nel_mp_ref_0,
						B_multip=B_multip_per_eV*self.machine.p0/e*c)

		# setup transverse losses (to "protect" the ecloud)
		import PyHEADTAIL.aperture.aperture as aperture
		apt_xy = aperture.EllipticalApertureXY(x_aper=x_aper, y_aper=y_aper)
		self.machine.one_turn_map.append(apt_xy)
		
		n_non_parallelizable = 2 #rf and aperture
		
		# We suppose that all the object that cannot be slice parallelized are at the end of the ring
		i_end_parallel = len(self.machine.one_turn_map)-n_non_parallelizable

		# split the machine
		sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes)
		myid = self.ring_of_CPUs.myid
		i_start_part, i_end_part = sharing.my_part(myid)
		self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]
		if self.ring_of_CPUs.I_am_a_worker:
			print 'I am id=%d/%d (worker) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart))
		elif self.ring_of_CPUs.I_am_the_master:
			self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]
			print 'I am id=%d/%d (master) and my part is %d long'%(myid, self.ring_of_CPUs.N_nodes, len(self.mypart))
		
		#install eclouds in my part
		my_new_part = []
		self.my_list_eclouds = []
		for ele in self.mypart:
			my_new_part.append(ele)
			if ele in self.machine.transverse_map:
				ecloud_new = ecloud.generate_twin_ecloud_with_shared_space_charge()
				my_new_part.append(ecloud_new)
				self.my_list_eclouds.append(ecloud_new)
		self.mypart = my_new_part

	def init_master(self):
		
		# generate a bunch 
		bunch = self.machine.generate_6D_Gaussian_bunch_matched(
						n_macroparticles=n_macroparticles, intensity=intensity, 
						epsn_x=epsn_x, epsn_y=epsn_y, sigma_z=sigma_z)
		print 'Bunch initialized.'

		# initial slicing
		from PyHEADTAIL.particles.slicing import UniformBinSlicer
		self.slicer = UniformBinSlicer(n_slices = n_slices, n_sigma_z = n_sigma_z)

		# compute initial displacements
		inj_opt = self.machine.transverse_map.get_injection_optics()
		sigma_x = np.sqrt(inj_opt['beta_x']*epsn_x/self.machine.betagamma)
		sigma_y = np.sqrt(inj_opt['beta_y']*epsn_y/self.machine.betagamma)
		x_kick = x_kick_in_sigmas*sigma_x
		y_kick = y_kick_in_sigmas*sigma_y
		
		# apply initial displacement
		bunch.x += x_kick
		bunch.y += y_kick
		
		# define a bunch monitor 
		from PyHEADTAIL.monitors.monitors import BunchMonitor
		self.bunch_monitor = BunchMonitor('bunch_evolution', N_turns, {'Comment':'PyHDTL simulation'}, 
							write_buffer_every = 8)

		
		#slice for the first turn
		slice_obj_list = bunch.extract_slices(self.slicer)

		pieces_to_be_treated = slice_obj_list
		
		print 'N_turns', self.N_turns

		return pieces_to_be_treated

	def init_worker(self):
		pass

	def treat_piece(self, piece):
		for ele in self.mypart: 
				ele.track(piece)

	def finalize_turn_on_master(self, pieces_treated):
		
		# re-merge bunch
		bunch = sum(pieces_treated)

		#finalize present turn (with non parallel part, e.g. synchrotron motion)
		for ele in self.non_parallel_part:
			ele.track(bunch)
			
		# save results		
		#print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn)
		self.bunch_monitor.dump(bunch)
		
		# prepare next turn (re-slice)
		new_pieces_to_be_treated = bunch.extract_slices(self.slicer)
		orders_to_pass = ['reset_clouds']

		return orders_to_pass, new_pieces_to_be_treated


	def execute_orders_from_master(self, orders_from_master):
		if 'reset_clouds' in orders_from_master:
			for ec in self.my_list_eclouds: ec.finalize_and_reinitialize()


		
	def finalize_simulation(self):
		pass
		
	def piece_to_buffer(self, piece):
		buf = ch.beam_2_buffer(piece)
		return buf

	def buffer_to_piece(self, buf):
		piece = ch.buffer_2_beam(buf)
		return piece
Esempio n. 25
0
class TestMonitor(unittest.TestCase):
    ''' Test the BunchMonitor/SliceMonitor'''
    def setUp(self):
        self.n_turns = 10
        self.bunch_fn = 'bunchm'
        self.s_fn = 'sm'
        self.nslices = 5
        self.bunch_monitor = BunchMonitor(filename=self.bunch_fn,
                                          n_steps=self.n_turns,
                                          write_buffer_every=2,
                                          buffer_size=7,
                                          stats_to_store=['mean_x', 'macrop'])

    def tearDown(self):
        try:
            os.remove(self.bunch_fn + '.h5')
            os.remove(self.s_fn + '.h5')
            pass
        except:
            pass

    def test_bunchmonitor(self):
        '''
        Test whether the data stored in the h5 file correspond to the
        correct values. Use a mock bunch class which creates an easy
        to check pattern when accessing 'mean_x', 'macrop'
        '''
        mock = self.generate_mock_bunch()
        for i in xrange(self.n_turns):
            self.bunch_monitor.dump(mock)
        bunchdata = hp.File(self.bunch_fn + '.h5')
        b = bunchdata['Bunch']
        self.assertTrue(
            np.allclose(b['mean_x'], np.arange(start=1,
                                               stop=self.n_turns + 0.5)))
        self.assertTrue(np.allclose(b['macrop'], 99 * np.ones(self.n_turns)))

    def test_slicemonitor(self):
        '''
        Test whether the slicemonitor works as expected, use the mock slicer
        '''
        nslices = 3
        mock_slicer = self.generate_mock_slicer(nslices)
        mock_bunch = self.generate_mock_bunch()
        slice_monitor = SliceMonitor(filename=self.s_fn,
                                     n_steps=self.n_turns,
                                     slicer=mock_slicer,
                                     buffer_size=11,
                                     write_buffer_every=9,
                                     slice_stats_to_store=['propertyA'],
                                     bunch_stats_to_store=['mean_x', 'macrop'])
        for i in xrange(self.n_turns):
            slice_monitor.dump(mock_bunch)
        s = hp.File(self.s_fn + '.h5')
        sd = s['Slices']
        sb = s['Bunch']
        self.assertTrue(
            np.allclose(sb['mean_x'],
                        np.arange(start=1, stop=self.n_turns + 0.5)))
        self.assertTrue(np.allclose(sb['macrop'], 99 * np.ones(self.n_turns)))
        for k in xrange(nslices):
            for j in xrange(self.n_turns):
                self.assertTrue(
                    np.allclose(sd['propertyA'][k, j], k + (j + 1) * 1000),
                    'Slices part of SliceMonitor wrong')

    def test_cellmonitor(self):
        '''
        Test whether the cellmonitor works as expected.
        '''
        bunch = self.generate_real_bunch()
        cell_monitor = CellMonitor(
            filename=self.s_fn,
            n_steps=self.n_turns,
            n_azimuthal_slices=4,
            n_radial_slices=3,
            radial_cut=bunch.sigma_dp() * 3,
            beta_z=np.abs(0.003 * bunch.circumference / (2 * np.pi * 0.004)),
            write_buffer_every=9,
        )
        for i in xrange(self.n_turns):
            cell_monitor.dump(bunch)
        s = hp.File(self.s_fn + '.h5')
        sc = s['Cells']

        # to be extended

    def generate_mock_bunch(self):
        '''
        Create a mock class which defines certain attributes which can be
        stored via the BunchMonitor
        '''
        class Mock():
            def __init__(self):
                self.counter = np.zeros(
                    3, dtype=np.int32)  #1 for each of mean/std/...
                self.macrop = 99

            def mean_x(self):
                self.counter[0] += 1
                return self.counter[0]

            def mean_y(self):
                self.counter[1] += 1
                return self.counter[1]

            def get_slices(self, slicer, **kwargs):
                return slicer

        return Mock()

    def generate_real_bunch(self):

        #beam parameters
        intensity = 1.234e9
        circumference = 111.
        gamma = 20.1

        #simulation parameters
        macroparticlenumber = 2048
        particlenumber_per_mp = intensity / macroparticlenumber

        x = np.random.uniform(-1, 1, macroparticlenumber)
        y = np.random.uniform(-1, 1, macroparticlenumber)
        z = np.random.uniform(-1, 1, macroparticlenumber)
        xp = np.random.uniform(-0.5, 0.5, macroparticlenumber)
        yp = np.random.uniform(-0.5, 0.5, macroparticlenumber)
        dp = np.random.uniform(-0.5, 0.5, macroparticlenumber)
        coords_n_momenta_dict = {
            'x': x,
            'y': y,
            'z': z,
            'xp': xp,
            'yp': yp,
            'dp': dp
        }
        return Particles(macroparticlenumber, particlenumber_per_mp, e, m_p,
                         circumference, gamma, coords_n_momenta_dict)

    def generate_mock_slicer(self, nslices):
        ''' Create a mock slicer to test behaviour'''
        class Mock():
            def __init__(self, nslices):
                self.n_slices = nslices
                self.counter = 0

            @property
            def propertyA(self):
                ''' Return an array of length nslices, np.arange(nslices)
                Add the number of calls * 1000 to the array
                This makes it easy to compare the results
                '''
                self.counter += 1
                prop = np.arange(0, self.n_slices, 1, dtype=np.float64)
                prop += self.counter * 1000
                return prop

        return Mock(nslices)
# setup transverse losses (to "protect" the ecloud)
import PyHEADTAIL.aperture.aperture as aperture
apt_xy = aperture.EllipticalApertureXY(x_aper=ecloud.impact_man.chamb.x_aper,
                                       y_aper=ecloud.impact_man.chamb.y_aper)
machine.one_turn_map.append(apt_xy)

# generate a bunch
bunch = machine.generate_6D_Gaussian_bunch_matched(n_macroparticles=300000,
                                                   intensity=intensity,
                                                   epsn_x=epsn_x,
                                                   epsn_y=epsn_y,
                                                   sigma_z=1.35e-9 / 4 * c)

# apply initial displacement
bunch.x += x_kick
bunch.y += y_kick

# define a bunch monitor
from PyHEADTAIL.monitors.monitors import BunchMonitor
bunch_monitor = BunchMonitor('bunch_evolution.h5',
                             N_turns, {'Comment': 'PyHDTL simulation'},
                             write_buffer_every=8)

# simulate
import time
for i_turn in xrange(N_turns):
    print '%s Turn %d' % (time.strftime("%d/%m/%Y %H:%M:%S",
                                        time.localtime()), i_turn)
    machine.track(bunch, verbose=False)
    bunch_monitor.dump(bunch)
Esempio n. 27
0
class TestMonitor(unittest.TestCase):
    ''' Test the BunchMonitor/SliceMonitor'''
    def setUp(self):
        self.n_turns = 10
        self.bunch_fn = 'bunchm'
        self.s_fn = 'sm'
        self.nslices = 5
        self.bunch_monitor = BunchMonitor(filename=self.bunch_fn,
                                          n_steps=self.n_turns,
                                          write_buffer_every=2,
                                          buffer_size=7,
                                          stats_to_store=['mean_x', 'macrop'])

    def tearDown(self):
        try:
            os.remove(self.bunch_fn + '.h5')
            os.remove(self.s_fn + '.h5')
            pass
        except:
            pass

    def test_bunchmonitor(self):
        '''
        Test whether the data stored in the h5 file correspond to the
        correct values. Use a mock bunch class which creates an easy
        to check pattern when accessing 'mean_x', 'macrop'
        '''
        mock = self.generate_mock_bunch()
        for i in range(self.n_turns):
            self.bunch_monitor.dump(mock)
        bunchdata = hp.File(self.bunch_fn + '.h5')
        b = bunchdata['Bunch']
        self.assertTrue(
            np.allclose(b['mean_x'], np.arange(start=1,
                                               stop=self.n_turns + 0.5)))
        self.assertTrue(np.allclose(b['macrop'], 99 * np.ones(self.n_turns)))

    def test_slicemonitor(self):
        '''
        Test whether the slicemonitor works as excpected, use the mock slicer
        '''
        nslices = 3
        mock_slicer = self.generate_mock_slicer(nslices)
        mock_bunch = self.generate_mock_bunch()
        slice_monitor = SliceMonitor(filename=self.s_fn,
                                     n_steps=self.n_turns,
                                     slicer=mock_slicer,
                                     buffer_size=11,
                                     write_buffer_every=9,
                                     slice_stats_to_store=['propertyA'],
                                     bunch_stats_to_store=['mean_x', 'macrop'])
        for i in range(self.n_turns):
            slice_monitor.dump(mock_bunch)
        s = hp.File(self.s_fn + '.h5')
        sd = s['Slices']
        sb = s['Bunch']
        self.assertTrue(
            np.allclose(sb['mean_x'],
                        np.arange(start=1, stop=self.n_turns + 0.5)))
        self.assertTrue(np.allclose(sb['macrop'], 99 * np.ones(self.n_turns)))
        for k in range(nslices):
            for j in range(self.n_turns):
                self.assertTrue(
                    np.allclose(sd['propertyA'][k, j], k + (j + 1) * 1000),
                    'Slices part of SliceMonitor wrong')

    def generate_mock_bunch(self):
        '''
        Create a mock class which defines certain attributes which can be
        stored via the BunchMonitor
        '''
        class Mock():
            def __init__(self):
                self.counter = np.zeros(
                    3, dtype=np.int32)  #1 for each of mean/std/...
                self.macrop = 99

            def mean_x(self):
                self.counter[0] += 1
                return self.counter[0]

            def mean_y(self):
                self.counter[1] += 1
                return self.counter[1]

            def get_slices(self, slicer, **kwargs):
                return slicer

        return Mock()

    def generate_mock_slicer(self, nslices):
        ''' Create a mock slicer to test behaviour'''
        class Mock():
            def __init__(self, nslices):
                self.n_slices = nslices
                self.counter = 0

            @property
            def propertyA(self):
                ''' Return an array of length nslices, np.arange(nslices)
                Add the number of calls * 1000 to the array
                This makes it easy to compare the results
                '''
                self.counter += 1
                prop = np.arange(0, self.n_slices, 1, dtype=np.float64)
                prop += self.counter * 1000
                return prop

        return Mock(nslices)