예제 #1
0
 def test_slicemonitor(self):
     '''
     Test whether the slicemonitor works as excpected, use the mock slicer
     '''
     nslices = 3
     mock_slicer = self.generate_mock_slicer(nslices)
     mock_bunch = self.generate_mock_bunch()
     slice_monitor = SliceMonitor(filename=self.s_fn, n_steps=self.n_turns,
             slicer=mock_slicer, buffer_size=11, write_buffer_every=9,
             slice_stats_to_store=['propertyA'],
             bunch_stats_to_store=['mean_x', 'macrop'])
     for i in xrange(self.n_turns):
         slice_monitor.dump(mock_bunch)
     s = hp.File(self.s_fn + '.h5')
     sd = s['Slices']
     sb = s['Bunch']
     self.assertTrue(np.allclose(sb['mean_x'],
         np.arange(start=1, stop=self.n_turns+0.5)))
     self.assertTrue(np.allclose(sb['macrop'], 99*np.ones(self.n_turns)))
     for k in xrange(nslices):
         for j in xrange(self.n_turns):
             self.assertTrue(np.allclose(sd['propertyA'][k,j],
                 k + (j+1)*1000), 'Slices part of SliceMonitor wrong')
    def init_master(self):

        # Manage multi-job operation
        if pp.footprint_mode:
            if pp.N_turns != pp.N_turns_target:
                raise ValueError(
                    'In footprint mode you need to set N_turns_target=N_turns_per_run!'
                )

        import Save_Load_Status as SLS
        SimSt = SLS.SimulationStatus(N_turns_per_run=pp.N_turns,
                                     check_for_resubmit=True,
                                     N_turns_target=pp.N_turns_target)
        SimSt.before_simulation()
        self.SimSt = SimSt

        # generate a bunch
        if pp.footprint_mode:
            self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles_for_footprint_track,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)
        elif SimSt.first_run:
            self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)

            # compute initial displacements
            inj_opt = self.machine.transverse_map.get_injection_optics()
            sigma_x = np.sqrt(inj_opt['beta_x'] * pp.epsn_x /
                              self.machine.betagamma)
            sigma_y = np.sqrt(inj_opt['beta_y'] * pp.epsn_y /
                              self.machine.betagamma)
            x_kick = pp.x_kick_in_sigmas * sigma_x
            y_kick = pp.y_kick_in_sigmas * sigma_y

            # apply initial displacement
            if not pp.footprint_mode:
                self.bunch.x += x_kick
                self.bunch.y += y_kick

            print 'Bunch initialized.'
        else:
            print 'Loading bunch from file...'
            with h5py.File(
                    'bunch_status_part%02d.h5' %
                (SimSt.present_simulation_part - 1), 'r') as fid:
                self.bunch = self.buffer_to_piece(
                    np.array(fid['bunch']).copy())
            print 'Bunch loaded from file.'

        # initial slicing
        self.slicer = UniformBinSlicer(n_slices=pp.n_slices,
                                       z_cuts=(-pp.z_cut, pp.z_cut))

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor(
            'bunch_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        # define a slice monitor
        from PyHEADTAIL.monitors.monitors import SliceMonitor
        self.slice_monitor = SliceMonitor(
            'slice_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns,
            self.slicer, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        #slice for the first turn
        slice_obj_list = self.bunch.extract_slices(self.slicer)

        pieces_to_be_treated = slice_obj_list

        print 'N_turns', self.N_turns

        if pp.footprint_mode:
            self.recorded_particles = ParticleTrajectories(
                pp.n_macroparticles_for_footprint_track, self.N_turns)

        return pieces_to_be_treated
class Simulation(object):
    def __init__(self):
        self.N_turns = pp.N_turns

    def init_all(self):

        self.n_slices = pp.n_slices

        # read the optics if needed
        if pp.optics_pickle_file is not None:
            with open(pp.optics_pickle_file) as fid:
                optics = pickle.load(fid)
                self.n_kick_smooth = np.sum(
                    ['_kick_smooth_' in nn for nn in optics['name']])
        else:
            optics = None
            self.n_kick_smooth = pp.n_segments

        # define the machine
        from LHC_custom import LHC
        self.machine = LHC(n_segments=pp.n_segments,
                           machine_configuration=pp.machine_configuration,
                           beta_x=pp.beta_x,
                           beta_y=pp.beta_y,
                           accQ_x=pp.Q_x,
                           accQ_y=pp.Q_y,
                           Qp_x=pp.Qp_x,
                           Qp_y=pp.Qp_y,
                           octupole_knob=pp.octupole_knob,
                           optics_dict=optics)
        self.n_segments = self.machine.transverse_map.n_segments

        # compute sigma
        inj_opt = self.machine.transverse_map.get_injection_optics()
        sigma_x_inj = np.sqrt(inj_opt['beta_x'] * pp.epsn_x /
                              self.machine.betagamma)
        sigma_y_inj = np.sqrt(inj_opt['beta_y'] * pp.epsn_y /
                              self.machine.betagamma)

        if pp.optics_pickle_file is None:
            sigma_x_smooth = sigma_x_inj
            sigma_y_smooth = sigma_y_inj
        else:
            beta_x_smooth = None
            beta_y_smooth = None
            for ele in self.machine.one_turn_map:
                if ele in self.machine.transverse_map:
                    if '_kick_smooth_' in ele.name1:
                        if beta_x_smooth is None:
                            beta_x_smooth = ele.beta_x1
                            beta_y_smooth = ele.beta_y1
                        else:
                            if beta_x_smooth != ele.beta_x1 or beta_y_smooth != ele.beta_y1:
                                raise ValueError(
                                    'Smooth kicks must have all the same beta')

            if beta_x_smooth is None:
                sigma_x_smooth = None
                sigma_y_smooth = None
            else:
                sigma_x_smooth = np.sqrt(beta_x_smooth * pp.epsn_x /
                                         self.machine.betagamma)
                sigma_y_smooth = np.sqrt(beta_y_smooth * pp.epsn_y /
                                         self.machine.betagamma)

        # define MP size
        nel_mp_ref_0 = pp.init_unif_edens_dip * 4 * pp.x_aper * pp.y_aper / pp.N_MP_ele_init_dip

        # prepare e-cloud
        import PyECLOUD.PyEC4PyHT as PyEC4PyHT

        if pp.custom_target_grid_arcs is not None:
            target_grid_arcs = pp.custom_target_grid_arcs
        else:
            target_grid_arcs = {
                'x_min_target':
                -pp.target_size_internal_grid_sigma * sigma_x_smooth,
                'x_max_target':
                pp.target_size_internal_grid_sigma * sigma_x_smooth,
                'y_min_target':
                -pp.target_size_internal_grid_sigma * sigma_y_smooth,
                'y_max_target':
                pp.target_size_internal_grid_sigma * sigma_y_smooth,
                'Dh_target': pp.target_Dh_internal_grid_sigma * sigma_x_smooth
            }
        self.target_grid_arcs = target_grid_arcs

        if pp.enable_arc_dip:
            ecloud_dip = PyEC4PyHT.Ecloud(
                slice_by_slice_mode=True,
                L_ecloud=self.machine.circumference / self.n_kick_smooth *
                pp.fraction_device_dip,
                slicer=None,
                Dt_ref=pp.Dt_ref,
                pyecl_input_folder=pp.pyecl_input_folder,
                chamb_type=pp.chamb_type,
                x_aper=pp.x_aper,
                y_aper=pp.y_aper,
                filename_chm=pp.filename_chm,
                PyPICmode=pp.PyPICmode,
                Dh_sc=pp.Dh_sc_ext,
                N_min_Dh_main=pp.N_min_Dh_main,
                f_telescope=pp.f_telescope,
                N_nodes_discard=pp.N_nodes_discard,
                target_grid=target_grid_arcs,
                init_unif_edens_flag=pp.init_unif_edens_flag_dip,
                init_unif_edens=pp.init_unif_edens_dip,
                N_mp_max=pp.N_mp_max_dip,
                nel_mp_ref_0=nel_mp_ref_0,
                B_multip=pp.B_multip_dip,
                enable_kick_x=pp.enable_kick_x,
                enable_kick_y=pp.enable_kick_y)

        if pp.enable_arc_quad:
            ecloud_quad = PyEC4PyHT.Ecloud(
                slice_by_slice_mode=True,
                L_ecloud=self.machine.circumference / self.n_kick_smooth *
                pp.fraction_device_quad,
                slicer=None,
                Dt_ref=pp.Dt_ref,
                pyecl_input_folder=pp.pyecl_input_folder,
                chamb_type=pp.chamb_type,
                x_aper=pp.x_aper,
                y_aper=pp.y_aper,
                filename_chm=pp.filename_chm,
                PyPICmode=pp.PyPICmode,
                Dh_sc=pp.Dh_sc_ext,
                N_min_Dh_main=pp.N_min_Dh_main,
                f_telescope=pp.f_telescope,
                N_nodes_discard=pp.N_nodes_discard,
                target_grid=target_grid_arcs,
                N_mp_max=pp.N_mp_max_quad,
                nel_mp_ref_0=nel_mp_ref_0,
                B_multip=pp.B_multip_quad,
                filename_init_MP_state=pp.filename_init_MP_state_quad,
                enable_kick_x=pp.enable_kick_x,
                enable_kick_y=pp.enable_kick_y)

        if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_dip:
            with open('multigrid_config_dip.txt', 'w') as fid:
                if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'):
                    fid.write(repr(ecloud_dip.spacech_ele.PyPICobj.grids))
                else:
                    fid.write("Single grid.")

            with open('multigrid_config_dip.pkl', 'w') as fid:
                if hasattr(ecloud_dip.spacech_ele.PyPICobj, 'grids'):
                    pickle.dump(ecloud_dip.spacech_ele.PyPICobj.grids, fid)
                else:
                    pickle.dump('Single grid.', fid)

        if self.ring_of_CPUs.I_am_the_master and pp.enable_arc_quad:
            with open('multigrid_config_quad.txt', 'w') as fid:
                if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'):
                    fid.write(repr(ecloud_quad.spacech_ele.PyPICobj.grids))
                else:
                    fid.write("Single grid.")

            with open('multigrid_config_quad.pkl', 'w') as fid:
                if hasattr(ecloud_quad.spacech_ele.PyPICobj, 'grids'):
                    pickle.dump(ecloud_quad.spacech_ele.PyPICobj.grids, fid)
                else:
                    pickle.dump('Single grid.', fid)

        # setup transverse losses (to "protect" the ecloud)
        import PyHEADTAIL.aperture.aperture as aperture
        apt_xy = aperture.EllipticalApertureXY(
            x_aper=pp.target_size_internal_grid_sigma * sigma_x_inj,
            y_aper=pp.target_size_internal_grid_sigma * sigma_y_inj)
        self.machine.one_turn_map.append(apt_xy)

        if pp.enable_transverse_damper:
            # setup transverse damper
            from PyHEADTAIL.feedback.transverse_damper import TransverseDamper
            damper = TransverseDamper(dampingrate_x=pp.dampingrate_x,
                                      dampingrate_y=pp.dampingrate_y)
            self.machine.one_turn_map.append(damper)

        # We suppose that all the object that cannot be slice parallelized are at the end of the ring
        i_end_parallel = len(
            self.machine.one_turn_map) - pp.n_non_parallelizable

        # split the machine
        sharing = shs.ShareSegments(i_end_parallel, self.ring_of_CPUs.N_nodes)
        myid = self.ring_of_CPUs.myid
        i_start_part, i_end_part = sharing.my_part(myid)
        self.mypart = self.machine.one_turn_map[i_start_part:i_end_part]
        if self.ring_of_CPUs.I_am_a_worker:
            print 'I am id=%d/%d (worker) and my part is %d long' % (
                myid, self.ring_of_CPUs.N_nodes, len(self.mypart))
        elif self.ring_of_CPUs.I_am_the_master:
            self.non_parallel_part = self.machine.one_turn_map[i_end_parallel:]
            print 'I am id=%d/%d (master) and my part is %d long' % (
                myid, self.ring_of_CPUs.N_nodes, len(self.mypart))

        #install eclouds in my part
        my_new_part = []
        self.my_list_eclouds = []
        for ele in self.mypart:
            my_new_part.append(ele)
            if ele in self.machine.transverse_map:
                if pp.optics_pickle_file is None or '_kick_smooth_' in ele.name1:
                    if pp.enable_arc_dip:
                        ecloud_dip_new = ecloud_dip.generate_twin_ecloud_with_shared_space_charge(
                        )
                        my_new_part.append(ecloud_dip_new)
                        self.my_list_eclouds.append(ecloud_dip_new)
                    if pp.enable_arc_quad:
                        ecloud_quad_new = ecloud_quad.generate_twin_ecloud_with_shared_space_charge(
                        )
                        my_new_part.append(ecloud_quad_new)
                        self.my_list_eclouds.append(ecloud_quad_new)
                elif '_kick_element_' in ele.name1 and pp.enable_eclouds_at_kick_elements:

                    i_in_optics = list(optics['name']).index(ele.name1)
                    kick_name = optics['name'][i_in_optics]
                    element_name = kick_name.split('_kick_element_')[-1]
                    L_curr = optics['L_interaction'][i_in_optics]

                    buildup_folder = pp.path_buildup_simulations_kick_elements.replace(
                        '!!!NAME!!!', element_name)
                    chamber_fname = '%s_chamber.mat' % (element_name)

                    B_multip_curr = [0., optics['gradB'][i_in_optics]]

                    x_beam_offset = optics['x'][i_in_optics] * pp.orbit_factor
                    y_beam_offset = optics['y'][i_in_optics] * pp.orbit_factor

                    sigma_x_local = np.sqrt(optics['beta_x'][i_in_optics] *
                                            pp.epsn_x / self.machine.betagamma)
                    sigma_y_local = np.sqrt(optics['beta_y'][i_in_optics] *
                                            pp.epsn_y / self.machine.betagamma)

                    ecloud_ele = PyEC4PyHT.Ecloud(
                        slice_by_slice_mode=True,
                        L_ecloud=L_curr,
                        slicer=None,
                        Dt_ref=pp.Dt_ref,
                        pyecl_input_folder=pp.pyecl_input_folder,
                        chamb_type='polyg',
                        x_aper=None,
                        y_aper=None,
                        filename_chm=buildup_folder + '/' + chamber_fname,
                        PyPICmode=pp.PyPICmode,
                        Dh_sc=pp.Dh_sc_ext,
                        N_min_Dh_main=pp.N_min_Dh_main,
                        f_telescope=pp.f_telescope,
                        N_nodes_discard=pp.N_nodes_discard,
                        target_grid={
                            'x_min_target':
                            -pp.target_size_internal_grid_sigma * sigma_x_local
                            + x_beam_offset,
                            'x_max_target':
                            pp.target_size_internal_grid_sigma * sigma_x_local
                            + x_beam_offset,
                            'y_min_target':
                            -pp.target_size_internal_grid_sigma * sigma_y_local
                            + y_beam_offset,
                            'y_max_target':
                            pp.target_size_internal_grid_sigma * sigma_y_local
                            + y_beam_offset,
                            'Dh_target':
                            pp.target_Dh_internal_grid_sigma * sigma_y_local
                        },
                        N_mp_max=pp.N_mp_max_quad,
                        nel_mp_ref_0=nel_mp_ref_0,
                        B_multip=B_multip_curr,
                        filename_init_MP_state=buildup_folder + '/' +
                        pp.name_MP_state_file_kick_elements,
                        x_beam_offset=x_beam_offset,
                        y_beam_offset=y_beam_offset,
                        enable_kick_x=pp.enable_kick_x,
                        enable_kick_y=pp.enable_kick_y)

                    my_new_part.append(ecloud_ele)
                    self.my_list_eclouds.append(ecloud_ele)

        self.mypart = my_new_part

        if pp.footprint_mode:
            print 'Proc. %d computing maps' % myid
            # generate a bunch
            bunch_for_map = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles_for_footprint_map,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)

            # Slice the bunch
            slicer_for_map = UniformBinSlicer(n_slices=pp.n_slices,
                                              z_cuts=(-pp.z_cut, pp.z_cut))
            slices_list_for_map = bunch_for_map.extract_slices(slicer_for_map)

            #Track the previous part of the machine
            for ele in self.machine.one_turn_map[:i_start_part]:
                for ss in slices_list_for_map:
                    ele.track(ss)

            # Measure optics, track and replace clouds with maps
            list_ele_type = []
            list_meas_beta_x = []
            list_meas_alpha_x = []
            list_meas_beta_y = []
            list_meas_alpha_y = []
            for ele in self.mypart:
                list_ele_type.append(str(type(ele)))
                # Measure optics
                bbb = sum(slices_list_for_map)
                list_meas_beta_x.append(bbb.beta_Twiss_x())
                list_meas_alpha_x.append(bbb.alpha_Twiss_x())
                list_meas_beta_y.append(bbb.beta_Twiss_y())
                list_meas_alpha_y.append(bbb.alpha_Twiss_y())

                if ele in self.my_list_eclouds:
                    ele.track_once_and_replace_with_recorded_field_map(
                        slices_list_for_map)
                else:
                    for ss in slices_list_for_map:
                        ele.track(ss)
            print 'Proc. %d done with maps' % myid

            with open('measured_optics_%d.pkl' % myid, 'wb') as fid:
                pickle.dump(
                    {
                        'ele_type': list_ele_type,
                        'beta_x': list_meas_beta_x,
                        'alpha_x': list_meas_alpha_x,
                        'beta_y': list_meas_beta_y,
                        'alpha_y': list_meas_alpha_y,
                    }, fid)

            #remove RF
            if self.ring_of_CPUs.I_am_the_master:
                self.non_parallel_part.remove(self.machine.longitudinal_map)

    def init_master(self):

        # Manage multi-job operation
        if pp.footprint_mode:
            if pp.N_turns != pp.N_turns_target:
                raise ValueError(
                    'In footprint mode you need to set N_turns_target=N_turns_per_run!'
                )

        import Save_Load_Status as SLS
        SimSt = SLS.SimulationStatus(N_turns_per_run=pp.N_turns,
                                     check_for_resubmit=True,
                                     N_turns_target=pp.N_turns_target)
        SimSt.before_simulation()
        self.SimSt = SimSt

        # generate a bunch
        if pp.footprint_mode:
            self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles_for_footprint_track,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)
        elif SimSt.first_run:
            self.bunch = self.machine.generate_6D_Gaussian_bunch_matched(
                n_macroparticles=pp.n_macroparticles,
                intensity=pp.intensity,
                epsn_x=pp.epsn_x,
                epsn_y=pp.epsn_y,
                sigma_z=pp.sigma_z)

            # compute initial displacements
            inj_opt = self.machine.transverse_map.get_injection_optics()
            sigma_x = np.sqrt(inj_opt['beta_x'] * pp.epsn_x /
                              self.machine.betagamma)
            sigma_y = np.sqrt(inj_opt['beta_y'] * pp.epsn_y /
                              self.machine.betagamma)
            x_kick = pp.x_kick_in_sigmas * sigma_x
            y_kick = pp.y_kick_in_sigmas * sigma_y

            # apply initial displacement
            if not pp.footprint_mode:
                self.bunch.x += x_kick
                self.bunch.y += y_kick

            print 'Bunch initialized.'
        else:
            print 'Loading bunch from file...'
            with h5py.File(
                    'bunch_status_part%02d.h5' %
                (SimSt.present_simulation_part - 1), 'r') as fid:
                self.bunch = self.buffer_to_piece(
                    np.array(fid['bunch']).copy())
            print 'Bunch loaded from file.'

        # initial slicing
        self.slicer = UniformBinSlicer(n_slices=pp.n_slices,
                                       z_cuts=(-pp.z_cut, pp.z_cut))

        # define a bunch monitor
        from PyHEADTAIL.monitors.monitors import BunchMonitor
        self.bunch_monitor = BunchMonitor(
            'bunch_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        # define a slice monitor
        from PyHEADTAIL.monitors.monitors import SliceMonitor
        self.slice_monitor = SliceMonitor(
            'slice_evolution_%02d' % self.SimSt.present_simulation_part,
            pp.N_turns,
            self.slicer, {'Comment': 'PyHDTL simulation'},
            write_buffer_every=3)

        #slice for the first turn
        slice_obj_list = self.bunch.extract_slices(self.slicer)

        pieces_to_be_treated = slice_obj_list

        print 'N_turns', self.N_turns

        if pp.footprint_mode:
            self.recorded_particles = ParticleTrajectories(
                pp.n_macroparticles_for_footprint_track, self.N_turns)

        return pieces_to_be_treated

    def init_worker(self):
        pass

    def treat_piece(self, piece):
        for ele in self.mypart:
            ele.track(piece)

    def finalize_turn_on_master(self, pieces_treated):

        # re-merge bunch
        self.bunch = sum(pieces_treated)

        #finalize present turn (with non parallel part, e.g. synchrotron motion)
        for ele in self.non_parallel_part:
            ele.track(self.bunch)

        # save results
        #print '%s Turn %d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime()), i_turn)
        self.bunch_monitor.dump(self.bunch)
        self.slice_monitor.dump(self.bunch)

        # prepare next turn (re-slice)
        new_pieces_to_be_treated = self.bunch.extract_slices(self.slicer)

        # order reset of all clouds
        orders_to_pass = ['reset_clouds']

        if pp.footprint_mode:
            self.recorded_particles.dump(self.bunch)

        # check if simulation has to be stopped
        # 1. for beam losses
        if not pp.footprint_mode and self.bunch.macroparticlenumber < pp.sim_stop_frac * pp.n_macroparticles:
            orders_to_pass.append('stop')
            self.SimSt.check_for_resubmit = False
            print 'Stop simulation due to beam losses.'

        # 2. for the emittance growth
        if pp.flag_check_emittance_growth:
            epsn_x_max = (pp.epsn_x) * (1 + pp.epsn_x_max_growth_fraction)
            epsn_y_max = (pp.epsn_y) * (1 + pp.epsn_y_max_growth_fraction)
            if not pp.footprint_mode and (self.bunch.epsn_x() > epsn_x_max
                                          or self.bunch.epsn_y() > epsn_y_max):
                orders_to_pass.append('stop')
                self.SimSt.check_for_resubmit = False
                print 'Stop simulation due to emittance growth.'

        return orders_to_pass, new_pieces_to_be_treated

    def execute_orders_from_master(self, orders_from_master):
        if 'reset_clouds' in orders_from_master:
            for ec in self.my_list_eclouds:
                ec.finalize_and_reinitialize()

    def finalize_simulation(self):
        if pp.footprint_mode:
            # Tunes

            import NAFFlib
            print 'NAFFlib spectral analysis...'
            qx_i = np.empty_like(self.recorded_particles.x_i[:, 0])
            qy_i = np.empty_like(self.recorded_particles.x_i[:, 0])
            for ii in range(len(qx_i)):
                qx_i[ii] = NAFFlib.get_tune(self.recorded_particles.x_i[ii] +
                                            1j *
                                            self.recorded_particles.xp_i[ii])
                qy_i[ii] = NAFFlib.get_tune(self.recorded_particles.y_i[ii] +
                                            1j *
                                            self.recorded_particles.yp_i[ii])
            print 'NAFFlib spectral analysis done.'

            # Save
            import h5py
            dict_beam_status = {\
            'x_init': np.squeeze(self.recorded_particles.x_i[:,0]),
            'xp_init': np.squeeze(self.recorded_particles.xp_i[:,0]),
            'y_init': np.squeeze(self.recorded_particles.y_i[:,0]),
            'yp_init': np.squeeze(self.recorded_particles.yp_i[:,0]),
            'z_init': np.squeeze(self.recorded_particles.z_i[:,0]),
            'qx_i': qx_i,
            'qy_i': qy_i,
            'x_centroid': np.mean(self.recorded_particles.x_i, axis=1),
            'y_centroid': np.mean(self.recorded_particles.y_i, axis=1)}

            with h5py.File('footprint.h5', 'w') as fid:
                for kk in dict_beam_status.keys():
                    fid[kk] = dict_beam_status[kk]
        else:
            #save data for multijob operation and launch new job
            import h5py
            with h5py.File(
                    'bunch_status_part%02d.h5' %
                (self.SimSt.present_simulation_part), 'w') as fid:
                fid['bunch'] = self.piece_to_buffer(self.bunch)
            if not self.SimSt.first_run:
                os.system('rm bunch_status_part%02d.h5' %
                          (self.SimSt.present_simulation_part - 1))
            self.SimSt.after_simulation()

    def piece_to_buffer(self, piece):
        buf = ch.beam_2_buffer(piece)
        return buf

    def buffer_to_piece(self, buf):
        piece = ch.buffer_2_beam(buf)
        return piece
예제 #4
0
def run():
# HELPERS
    def read_all_data(bfile, sfile, pfile):
        bunchdata = hp.File(bfile + '.h5')
        slicedata = hp.File(sfile + '.h5')
        particledata = hp.File(pfile + '.h5part')

        # Bunchdata
        bdata = bunchdata['Bunch']

        n_turns = len(bdata['mean_x'])
        _ = np.empty(n_turns)
        for key in bdata.keys():
            _[:] = bdata[key][:]

        # Slicedata
        sdata = slicedata['Slices']
        sbdata = slicedata['Bunch']

        n_turns = len(sbdata['mean_x'])
        _ = np.empty(n_turns)
        for key in sbdata.keys():
            _[:] = sbdata[key][:]

        n_slices, n_turns = sdata['mean_x'].shape
        _ = np.empty((n_slices, n_turns))
        for key in sdata.keys():
            _[:,:] = sdata[key][:,:]

        # Particledata
        pdata = particledata['Step#0']
        n_particles = len(pdata['x'])
        n_steps = len(particledata.keys())
        _ = np.empty(n_particles)

        for i in xrange(n_steps):
            step = 'Step#%d' % i
            for key in particledata[step].keys():
                _[:] = particledata[step][key][:]

        bunchdata.close()
        slicedata.close()
        particledata.close()

    def read_n_plot_data(bfile, sfile, pfile):
        bunchdata = hp.File(bfile + '.h5')
        slicedata = hp.File(sfile + '.h5')
        particledata = hp.File(pfile + '.h5part')

        fig = plt.figure(figsize=(16, 16))
        ax1 = fig.add_subplot(311)
        ax2 = fig.add_subplot(312)
        ax3 = fig.add_subplot(313)

        ax1.plot(bunchdata['Bunch']['mean_x'][:])
        ax2.plot(slicedata['Slices']['mean_x'][:,:])
        ax3.plot(particledata['Step#0']['x'][:])
        #ax2.plot(slicedata[])

        plt.show()

        bunchdata.close()
        slicedata.close()
        particledata.close()

    def generate_bunch(n_macroparticles, alpha_x, alpha_y, beta_x, beta_y, alpha_0, Q_s, R):

        intensity = 1.05e11
        sigma_z = 0.059958
        gamma = 3730.26
        eta = alpha_0 - 1. / gamma**2
        gamma_t = 1. / np.sqrt(alpha_0)
        p0 = np.sqrt(gamma**2 - 1) * m_p * c

        beta_z = eta * R / Q_s

        epsn_x = 3.75e-6 # [m rad]
        epsn_y = 3.75e-6 # [m rad]
        epsn_z = 4 * np.pi * sigma_z**2 * p0 / (beta_z * e) # WITH OR WITHOUT 4 PIjQuery202047649151738733053_1414145430832?

        bunch = generators.generate_Gaussian6DTwiss(
            macroparticlenumber=n_macroparticles, intensity=intensity, charge=e,
            gamma=gamma, mass=m_p, circumference=C,
            alpha_x=alpha_x, beta_x=beta_x, epsn_x=epsn_x,
            alpha_y=alpha_y, beta_y=beta_y, epsn_y=epsn_y,
            beta_z=beta_z, epsn_z=epsn_z)
        return bunch


    # In[4]:
        # Basic parameters.
    n_turns = 2
    n_segments = 5
    n_macroparticles = 500

    Q_x = 64.28
    Q_y = 59.31
    Q_s = 0.0020443

    C = 26658.883
    R = C / (2.*np.pi)

    alpha_x_inj = 0.
    alpha_y_inj = 0.
    beta_x_inj = 66.0064
    beta_y_inj = 71.5376
    alpha_0 = 0.0003225


    # ##### Things tested:   - Instantiation of the three monitors BunchMonitor, SliceMonitor, ParticleMonitor.   - dump(beam) method for all the three.   - read data from file. Plot example data from Bunch-, Slice- and Particle-Monitors.   - SliceMonitor: does it handle/request slice_sets correctly?   - Buffers are on for Bunch- and SliceMonitors.  Look at one of the files in hdfview to check the units, attributes, ...

    # In[5]:

    # Parameters for transverse map.
    s = np.arange(0, n_segments + 1) * C / n_segments

    alpha_x = alpha_x_inj * np.ones(n_segments)
    beta_x = beta_x_inj * np.ones(n_segments)
    D_x = np.zeros(n_segments)

    alpha_y = alpha_y_inj * np.ones(n_segments)
    beta_y = beta_y_inj * np.ones(n_segments)
    D_y = np.zeros(n_segments)


    # In[6]:

    # Instantiate BunchMonitor, SliceMonitor and ParticleMonitor and dump data to file.
    bunch = generate_bunch(
        n_macroparticles, alpha_x_inj, alpha_y_inj, beta_x_inj, beta_y_inj,
        alpha_0, Q_s, R)

    trans_map = TransverseMap(
        s, alpha_x, beta_x, D_x, alpha_y, beta_y, D_y, Q_x, Q_y)

    # Slicer config for SliceMonitor.
    unibin_slicer = UniformBinSlicer(n_slices=10, n_sigma_z=None, z_cuts=None)

    # Monitors
    bunch_filename = 'bunch_mon'
    slice_filename = 'slice_mon'
    particle_filename = 'particle_mon'

    bunch_monitor = BunchMonitor(filename=bunch_filename, n_steps=n_turns, parameters_dict={'Q_x': Q_x},
                                 write_buffer_every=20)
    slice_monitor = SliceMonitor(
        filename=slice_filename, n_steps=n_turns, slicer=unibin_slicer, parameters_dict={'Q_x': Q_x},
        write_buffer_every=20)
    particle_monitor = ParticleMonitor(filename=particle_filename, stride=10, parameters_dict={'Q_x': Q_x})

    arrays_dict = {}
    map_ = trans_map
    for i in xrange(n_turns):
        for m_ in map_:
            m_.track(bunch)
        bunch_monitor.dump(bunch)
        slice_monitor.dump(bunch)

        slice_set_pmon = bunch.get_slices(unibin_slicer)
        arrays_dict.update({'slidx': slice_set_pmon.slice_index_of_particle, 'zz': bunch.z})
        particle_monitor.dump(bunch, arrays_dict)

    read_all_data(bunch_filename, slice_filename, particle_filename)

    os.remove(bunch_filename + '.h5')
    os.remove(slice_filename + '.h5')
    os.remove(particle_filename + '.h5part')
예제 #5
0
def run():
    # HELPERS
    def read_all_data(bfile, sfile, pfile):
        bunchdata = hp.File(bfile + '.h5')
        slicedata = hp.File(sfile + '.h5')
        particledata = hp.File(pfile + '.h5part')

        # Bunchdata
        bdata = bunchdata['Bunch']

        n_turns = len(bdata['mean_x'])
        _ = np.empty(n_turns)
        for key in list(bdata.keys()):
            _[:] = bdata[key][:]

        # Slicedata
        sdata = slicedata['Slices']
        sbdata = slicedata['Bunch']

        n_turns = len(sbdata['mean_x'])
        _ = np.empty(n_turns)
        for key in list(sbdata.keys()):
            _[:] = sbdata[key][:]

        n_slices, n_turns = sdata['mean_x'].shape
        _ = np.empty((n_slices, n_turns))
        for key in list(sdata.keys()):
            _[:, :] = sdata[key][:, :]

        # Particledata
        pdata = particledata['Step#0']
        n_particles = len(pdata['x'])
        n_steps = len(list(particledata.keys()))
        _ = np.empty(n_particles)

        for i in range(n_steps):
            step = 'Step#%d' % i
            for key in list(particledata[step].keys()):
                _[:] = particledata[step][key][:]

        bunchdata.close()
        slicedata.close()
        particledata.close()

    def read_n_plot_data(bfile, sfile, pfile):
        bunchdata = hp.File(bfile + '.h5')
        slicedata = hp.File(sfile + '.h5')
        particledata = hp.File(pfile + '.h5part')

        fig = plt.figure(figsize=(16, 16))
        ax1 = fig.add_subplot(311)
        ax2 = fig.add_subplot(312)
        ax3 = fig.add_subplot(313)

        ax1.plot(bunchdata['Bunch']['mean_x'][:])
        ax2.plot(slicedata['Slices']['mean_x'][:, :])
        ax3.plot(particledata['Step#0']['x'][:])
        #ax2.plot(slicedata[])

        plt.show()

        bunchdata.close()
        slicedata.close()
        particledata.close()

    def generate_bunch(n_macroparticles, alpha_x, alpha_y, beta_x, beta_y,
                       alpha_0, Q_s, R):

        intensity = 1.05e11
        sigma_z = 0.059958
        gamma = 3730.26
        eta = alpha_0 - 1. / gamma**2
        gamma_t = 1. / np.sqrt(alpha_0)
        p0 = np.sqrt(gamma**2 - 1) * m_p * c

        beta_z = eta * R / Q_s

        epsn_x = 3.75e-6  # [m rad]
        epsn_y = 3.75e-6  # [m rad]
        epsn_z = 4 * np.pi * sigma_z**2 * p0 / (
            beta_z * e
        )  # WITH OR WITHOUT 4 PIjQuery202047649151738733053_1414145430832?

        bunch = generators.generate_Gaussian6DTwiss(
            macroparticlenumber=n_macroparticles,
            intensity=intensity,
            charge=e,
            gamma=gamma,
            mass=m_p,
            circumference=C,
            alpha_x=alpha_x,
            beta_x=beta_x,
            epsn_x=epsn_x,
            alpha_y=alpha_y,
            beta_y=beta_y,
            epsn_y=epsn_y,
            beta_z=beta_z,
            epsn_z=epsn_z)
        return bunch

    # In[4]:
    # Basic parameters.
    n_turns = 2
    n_segments = 5
    n_macroparticles = 500

    Q_x = 64.28
    Q_y = 59.31
    Q_s = 0.0020443

    C = 26658.883
    R = C / (2. * np.pi)

    alpha_x_inj = 0.
    alpha_y_inj = 0.
    beta_x_inj = 66.0064
    beta_y_inj = 71.5376
    alpha_0 = 0.0003225

    # ##### Things tested:   - Instantiation of the three monitors BunchMonitor, SliceMonitor, ParticleMonitor.   - dump(beam) method for all the three.   - read data from file. Plot example data from Bunch-, Slice- and Particle-Monitors.   - SliceMonitor: does it handle/request slice_sets correctly?   - Buffers are on for Bunch- and SliceMonitors.  Look at one of the files in hdfview to check the units, attributes, ...

    # In[5]:

    # Parameters for transverse map.
    s = np.arange(0, n_segments + 1) * C / n_segments

    alpha_x = alpha_x_inj * np.ones(n_segments)
    beta_x = beta_x_inj * np.ones(n_segments)
    D_x = np.zeros(n_segments)

    alpha_y = alpha_y_inj * np.ones(n_segments)
    beta_y = beta_y_inj * np.ones(n_segments)
    D_y = np.zeros(n_segments)

    # In[6]:

    # Instantiate BunchMonitor, SliceMonitor and ParticleMonitor and dump data to file.
    bunch = generate_bunch(n_macroparticles, alpha_x_inj, alpha_y_inj,
                           beta_x_inj, beta_y_inj, alpha_0, Q_s, R)

    trans_map = TransverseMap(s, alpha_x, beta_x, D_x, alpha_y, beta_y, D_y,
                              Q_x, Q_y)

    # Slicer config for SliceMonitor.
    unibin_slicer = UniformBinSlicer(n_slices=10, n_sigma_z=None, z_cuts=None)

    # Monitors
    bunch_filename = 'bunch_mon'
    slice_filename = 'slice_mon'
    particle_filename = 'particle_mon'

    bunch_monitor = BunchMonitor(filename=bunch_filename,
                                 n_steps=n_turns,
                                 parameters_dict={'Q_x': Q_x},
                                 write_buffer_every=20)
    slice_monitor = SliceMonitor(filename=slice_filename,
                                 n_steps=n_turns,
                                 slicer=unibin_slicer,
                                 parameters_dict={'Q_x': Q_x},
                                 write_buffer_every=20)
    particle_monitor = ParticleMonitor(filename=particle_filename,
                                       stride=10,
                                       parameters_dict={'Q_x': Q_x})

    arrays_dict = {}
    map_ = trans_map
    for i in range(n_turns):
        for m_ in map_:
            m_.track(bunch)
        bunch_monitor.dump(bunch)
        slice_monitor.dump(bunch)

        slice_set_pmon = bunch.get_slices(unibin_slicer)
        arrays_dict.update({
            'slidx': slice_set_pmon.slice_index_of_particle,
            'zz': bunch.z
        })
        particle_monitor.dump(bunch, arrays_dict)

    read_all_data(bunch_filename, slice_filename, particle_filename)

    os.remove(bunch_filename + '.h5')
    os.remove(slice_filename + '.h5')
    os.remove(particle_filename + '.h5part')
예제 #6
0
def run(job_id,accQ_y):
    it = job_id
    
  
    # SIMULATION PARAMETERS
    # =====================
    
    # Simulation parameters 
    n_turns = 10000
    n_macroparticles = 100000 # per bunch 
    
    # MACHINE PARAMETERS
    # ==================
    
    intensity = 2e13 # protons
    Ek = 71e6 # Kinetic energy [eV]
    p0 = np.sqrt((m_p_MeV+Ek)**2 - m_p_MeV**2) * e /c
    
    print('Beam kinetic energy: ' + str(Ek*1e-6) + ' MeV')
    print('Beam momentum: ' + str(p0*1e-6*c/e) + ' MeV/c')
    
    
    accQ_x = 4.31 # Horizontal tune
#    accQ_y = 3.80 # Vertical tune is an input argument
    
    Q_s=0.02 # Longitudinal tune
    
    chroma=-1.4 # Chromaticity

    alpha = 5.034**-2 # momentum compaction
    
    circumference = 160. # [meters]
    
    # Approximated average beta functions (lumped wake normalizations)
    beta_x = circumference / (2.*np.pi*accQ_x) 
    beta_y = circumference / (2.*np.pi*accQ_y)
    
    # Harmonic number for RF
    h_RF = 2
    h_bunch = h_RF
    V_RF = 2e5
    p_increment = 0.
    dphi_RF = 0.
    longitudinal_mode = 'linear'
    
    optics_mode = 'smooth'
    n_segments = 1    
    s = None
    alpha_x = None
    alpha_y = None
    beta_x = circumference / (2.*np.pi*accQ_x)
    beta_y = circumference / (2.*np.pi*accQ_y)
    D_x = 0
    D_y = 0     
    charge = e
    mass = m_p
    name = None
    app_x = 0
    app_y = 0
    app_xy = 0
    
    # Creates PyHEADTAIL object for the synchotron
    machine = Synchrotron(optics_mode=optics_mode, circumference=circumference,
            n_segments=n_segments, s=s, name=name,
            alpha_x=alpha_x, beta_x=beta_x, D_x=D_x,
            alpha_y=alpha_y, beta_y=beta_y, D_y=D_y,
            accQ_x=accQ_x, accQ_y=accQ_y, Qp_x=chroma, Qp_y=chroma,
            app_x=app_x, app_y=app_y, app_xy=app_xy,
            alpha_mom_compaction=alpha, longitudinal_mode=longitudinal_mode,
            h_RF=np.atleast_1d(h_RF), V_RF=np.atleast_1d(V_RF),
            dphi_RF=np.atleast_1d(dphi_RF), p0=p0, p_increment=p_increment,
            charge=charge, mass=mass)
    
    print()
    print('machine.beta: ')
    print(machine.beta)
    print()
    
    
    epsn_x = 300e-6
    epsn_y = 300e-6
    sigma_z = 450e-9*c*machine.beta/4.
    
    allbunches = machine.generate_6D_Gaussian_bunch(n_macroparticles, intensity, epsn_x, epsn_y, sigma_z)
    
    # Slicer object, which used for wakefields and slice monitors
    slicer = UniformBinSlicer(50, z_cuts=(-4.*sigma_z, 4.*sigma_z))

    # WAKE FIELDS
    # ===========
    
    # Length of the wake function in turns, wake 
    n_turns_wake = 150
    
    # Parameters for a resonator
    # frequency is in the units of (mode-Q_frac), where
    #       mode: integer number of coupled bunch mode (1 matches to the observations)
    #       Q_frac: resonance fractional tune
    
    f_r = (1-0.83)*1./(circumference/(c*machine.beta))
    Q = 15
    R = 1.0e6
    
    # Renator wake object, which is added to the one turn map
    wakes = CircularResonator(R, f_r, Q, n_turns_wake=n_turns_wake)
    wake_field = WakeField(slicer, wakes)
    machine.one_turn_map.append(wake_field)


    # CREATE MONITORS
    # ===============
    simulation_parameters_dict = {'gamma'           : machine.gamma,\
                                  'intensity'       : intensity,\
                                  'Qx'              : accQ_x,\
                                  'Qy'              : accQ_y,\
                                  'Qs'              : Q_s,\
                                  'beta_x'          : beta_x,\
                                  'beta_y'          : beta_y,\
    #                               'beta_z'          : bucket.beta_z,\
                                  'epsn_x'          : epsn_x,\
                                  'epsn_y'          : epsn_y,\
                                  'sigma_z'         : sigma_z,\
                                 }
    # Bunch monitor strores bunch average positions for all the bunches
    bunchmonitor = BunchMonitor(outputpath + '/bunchmonitor_{:04d}'.format(it), n_turns,
                                simulation_parameters_dict,
                                write_buffer_every=32, buffer_size=32)
    
    # Slice monitors saves slice-by-slice data for each bunch
    slicemonitor = SliceMonitor(
        outputpath + '/slicemonitor_{:01d}_{:04d}'.format(0,it),
        16, slicer,
        simulation_parameters_dict, write_buffer_every=16, buffer_size=16)
        
    # Counter for a number of turns stored to slice monitors
    s_cnt = 0

    # TRACKING LOOP
    # =============
    monitor_active = False
    print('\n--> Begin tracking...\n')

    for i in range(n_turns):
        t0 = time.clock()

        # Tracks beam through the one turn map simulation map
        machine.track(allbunches)
        
        # Stores bunch mean coordinate values
        bunchmonitor.dump(allbunches)

        
    
        # If the total oscillation amplitude of bunches exceeds the threshold
        # or the simulation is running on the last turns, triggers the slice
        # monitors for headtail motion data
        if (allbunches.mean_x() > 1e0 or allbunches.mean_y() > 1e0 or i > (n_turns-64)):
                monitor_active = True
        
        # saves slice monitor data if monitors are activated and less than 
        # 64 turns have been stored
        if monitor_active and s_cnt<64:
            slicemonitor.dump(allbunches)
            s_cnt += 1  
        elif s_cnt == 64:
            break

        # If this script is runnin on the first processor, prints the current
        # bunch coordinates and emittances
        if (i%100 == 0):            
            print('{:4d} \t {:+3e} \t {:+3e} \t {:+3e} \t {:3e} \t {:3e} \t {:3f} \t {:3f} \t {:3f} \t {:3s}'.format(i, allbunches.mean_x(), allbunches.mean_y(), allbunches.mean_z(), allbunches.epsn_x(), allbunches.epsn_y(), allbunches.epsn_z(), allbunches.sigma_z(), allbunches.sigma_dp(), str(time.clock() - t0)))
예제 #7
0
def run(intensity, chroma=0, i_oct=0):
    '''Arguments:
        - intensity: integer number of charges in beam
        - chroma: first-order chromaticity Q'_{x,y}, identical
          for both transverse planes
        - i_oct: octupole current in A (positive i_oct means
          LOF = i_oct > 0 and LOD = -i_oct < 0)
    '''

    # BEAM AND MACHINE PARAMETERS
    # ============================
    from LHC import LHC
    # energy set above will enter get_nonlinear_params p0
    assert machine_configuration == 'LHC_6.5TeV_collision_2016'
    machine = LHC(n_segments=1,
                  machine_configuration=machine_configuration,
                  **get_nonlinear_params(chroma=chroma, i_oct=i_oct))

    # BEAM
    # ====
    epsn_x = 3.e-6  # normalised horizontal emittance
    epsn_y = 3.e-6  # normalised vertical emittance
    sigma_z = 1.2e-9 * machine.beta * c / 4.  # RMS bunch length in meters

    bunch = machine.generate_6D_Gaussian_bunch_matched(n_macroparticles,
                                                       intensity,
                                                       epsn_x,
                                                       epsn_y,
                                                       sigma_z=sigma_z)

    print("\n--> Bunch length and emittance: {:g} m, {:g} eVs.".format(
        bunch.sigma_z(), bunch.epsn_z()))

    # CREATE BEAM SLICERS
    # ===================
    slicer_for_slicemonitor = UniformBinSlicer(50,
                                               z_cuts=(-3 * sigma_z,
                                                       3 * sigma_z))
    slicer_for_wakefields = UniformBinSlicer(500,
                                             z_cuts=(-3 * sigma_z,
                                                     3 * sigma_z))

    # CREATE WAKES
    # ============
    wake_table1 = WakeTable(
        wakefile,
        [
            'time',
            'dipole_x',
            'dipole_y',
            'quadrupole_x',
            'quadrupole_y',
            # 'noquadrupole_x', 'noquadrupole_y',
            'dipole_xy',
            'dipole_yx',
            # 'nodipole_xy', 'nodipole_yx',
        ])
    wake_field = WakeField(slicer_for_wakefields, wake_table1)

    # CREATE DAMPER
    # =============
    dampingrate = 50
    damper = TransverseDamper(dampingrate, dampingrate)

    # CREATE MONITORS
    # ===============
    try:
        bucket = machine.longitudinal_map.get_bucket(bunch)
    except AttributeError:
        bucket = machine.rfbucket

    simulation_parameters_dict = {
        'gamma': machine.gamma,
        'intensity': intensity,
        'Qx': machine.Q_x,
        'Qy': machine.Q_y,
        'Qs': bucket.Q_s,
        'beta_x': bunch.beta_Twiss_x(),
        'beta_y': bunch.beta_Twiss_y(),
        'beta_z': bucket.beta_z,
        'epsn_x': bunch.epsn_x(),
        'epsn_y': bunch.epsn_y(),
        'sigma_z': bunch.sigma_z(),
    }
    bunchmonitor = BunchMonitor(
        outputpath + '/bunchmonitor_{:04d}_chroma={:g}'.format(it, chroma),
        n_turns,
        simulation_parameters_dict,
        write_buffer_every=100)
    slicemonitor = SliceMonitor(
        outputpath + '/slicemonitor_{:04d}_chroma={:g}'.format(it, chroma),
        n_turns_slicemon,
        slicer_for_slicemonitor,
        simulation_parameters_dict,
        write_buffer_every=1,
        buffer_size=n_turns_slicemon)

    # TRACKING LOOP
    # =============
    machine.one_turn_map.append(damper)
    machine.one_turn_map.append(wake_field)

    # for slice statistics monitoring:
    s_cnt = 0
    monitorswitch = False

    print('\n--> Begin tracking...\n')

    # GO!!!
    for i in range(n_turns):

        t0 = time.clock()

        # track the beam around the machine for one turn:
        machine.track(bunch)

        ex, ey, ez = bunch.epsn_x(), bunch.epsn_y(), bunch.epsn_z()
        mx, my, mz = bunch.mean_x(), bunch.mean_y(), bunch.mean_z()

        # monitor the bunch statistics (once per turn):
        bunchmonitor.dump(bunch)

        # if the centroid becomes unstable (>1cm motion)
        # then monitor the slice statistics:
        if not monitorswitch:
            if mx > 1e-2 or my > 1e-2 or i > n_turns - n_turns_slicemon:
                print("--> Activate slice monitor")
                monitorswitch = True
        else:
            if s_cnt < n_turns_slicemon:
                slicemonitor.dump(bunch)
                s_cnt += 1

        # stop the tracking as soon as we have not-a-number values:
        if not all(np.isfinite(c) for c in [ex, ey, ez, mx, my, mz]):
            print('*** STOPPING SIMULATION: non-finite bunch stats!')
            break

        # print status all 1000 turns:
        if i % 1000 == 0:
            t1 = time.clock()
            print('Emittances: ({:.3g}, {:.3g}, {:.3g}) '
                  '& Centroids: ({:.3g}, {:.3g}, {:.3g})'
                  '@ turn {:d}, {:g} ms, {:s}'.format(
                      ex, ey, ez, mx, my, mz, i, (t1 - t0) * 1e3,
                      time.strftime("%d/%m/%Y %H:%M:%S", time.localtime())))

    print('\n*** Successfully completed!')