Exemple #1
0
def BeamEmittances(bunch, beta):

    com = bunch.getMPIComm()
    mpi_size = orbit_mpi.MPI_Comm_size(com)
    op = mpi_op.MPI_SUM
    data_type = mpi_datatype.MPI_DOUBLE

    N_part_loc = bunch.getSize()
    N_part_glob = bunch.getSizeGlobal()

    P = 0
    for i in range(N_part_loc):
        P += bunch.pz(i)
    P = orbit_mpi.MPI_Allreduce(P, data_type, op, com)
    P = P / N_part_glob

    XP0 = 0
    YP0 = 0

    for i in range(N_part_loc):
        XP0 += bunch.px(i) / bunch.pz(i)
        YP0 += bunch.py(i) / bunch.pz(i)

    XP0 = orbit_mpi.MPI_Allreduce(XP0, data_type, op, com)
    YP0 = orbit_mpi.MPI_Allreduce(YP0, data_type, op, com)
    XP0 = XP0 / N_part_glob
    YP0 = YP0 / N_part_glob

    XP2 = 0
    YP2 = 0

    for i in range(N_part_loc):

        XP = bunch.px(i) / bunch.pz(i) - XP0
        YP = bunch.py(i) / bunch.pz(i) - YP0

        XP2 += XP * XP
        YP2 += YP * YP

    XP2 = orbit_mpi.MPI_Allreduce(XP2, data_type, op, com)
    YP2 = orbit_mpi.MPI_Allreduce(YP2, data_type, op, com)

    XP2 = XP2 / N_part_glob
    YP2 = YP2 / N_part_glob

    ex = XP2 * beta
    ey = YP2 * beta

    E = math.sqrt(P * P + bunch.mass() * bunch.mass())

    beta_rel = P / E
    gamma_rel = 1. / math.sqrt(1 - beta_rel * beta_rel)

    exn = ex * beta_rel * gamma_rel
    eyn = ey * beta_rel * gamma_rel

    return exn, eyn
 def _bin_coordinate(self, grid, u, comm):
     grid.setZero()
     Min = orbit_mpi.MPI_Allreduce(min(u), mpi_datatype.MPI_DOUBLE,
                                   mpi_op.MPI_MIN, comm)
     Max = orbit_mpi.MPI_Allreduce(max(u), mpi_datatype.MPI_DOUBLE,
                                   mpi_op.MPI_MAX, comm)
     grid_size = grid.getSizeZ()
     delta = (Max - Min) / grid_size
     Min -= delta * 1.5
     Max += delta * 1.5
     grid.setGridZ(Min, Max)
     map(lambda i: grid.binValue(1, u[i]), xrange(len(u)))
     grid.synchronizeMPI()
def LinearRestoringForce(bunch, force):

    rank = 0
    numprocs = 1

    mpi_init = orbit_mpi.MPI_Initialized()
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD

    if (mpi_init):
        rank = orbit_mpi.MPI_Comm_rank(comm)
        numprocs = orbit_mpi.MPI_Comm_size(comm)

    nparts_arr_local = []
    for i in range(numprocs):
        nparts_arr_local.append(0)

    nparts_arr_local[rank] = bunch.getSize()
    data_type = mpi_datatype.MPI_INT
    op = mpi_op.MPI_SUM

    nparts_arr = orbit_mpi.MPI_Allreduce(nparts_arr_local, data_type, op, comm)

    for i in range(bunch.getSize()):
        en = bunch.dE(i)

        en = en + bunch.z(i) * force

        bunch.dE(i, en)

    return
Exemple #4
0
def getKinEnergy(bunch):

    op = mpi_op.MPI_SUM
    data_type = mpi_datatype.MPI_DOUBLE
    mpi_size = orbit_mpi.MPI_Comm_size(mpi_comm.MPI_COMM_WORLD)
    size = bunch.getSize()

    pz = 0
    for i in range(size):
        pz += bunch.pz(i)
    pz /= size

    pz = orbit_mpi.MPI_Allreduce(pz, data_type, op, mpi_comm.MPI_COMM_WORLD)
    pz /= mpi_size

    Tk = math.sqrt(bunch.mass()**2 + pz**2) - bunch.mass()

    return Tk
    def addParticleIdNumbers(b, fixedidnumber=-1, part_ind=0):

        rank = 0
        numprocs = 1

        mpi_init = orbit_mpi.MPI_Initialized()
        comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD

        if (mpi_init):
            rank = orbit_mpi.MPI_Comm_rank(comm)
            numprocs = orbit_mpi.MPI_Comm_size(comm)

        nparts_arr_local = []
        for i in range(numprocs):
            nparts_arr_local.append(0)

        nparts_arr_local[rank] = b.getSize()
        data_type = mpi_datatype.MPI_INT
        op = mpi_op.MPI_SUM

        nparts_arr = orbit_mpi.MPI_Allreduce(nparts_arr_local, data_type, op,
                                             comm)

        if (b.hasPartAttr("ParticleIdNumber") == 0):
            b.addPartAttr("ParticleIdNumber")

        if (fixedidnumber >= 0):
            for i in range(part_ind, b.getSize()):
                b.partAttrValue("ParticleIdNumber", i, 0, fixedidnumber)

        else:
            istart = 0
            if (rank == 0):
                istart = 0
            else:
                for i in range(rank):
                    istart = istart + nparts_arr[i]

            for i in range(b.getSize()):
                idnumber = istart + i
                b.partAttrValue("ParticleIdNumber", i, 0, idnumber)
Exemple #6
0
    def analyzeBunch(self, bunch):
        """
		Here we assume that the macrosize is the same for each 
		particle
		"""
        comm = bunch.getMPIComm()
        self._cleanPhaseHist()
        self.x_avg = 0.
        self.y_avg = 0.
        self.phase_avg = 0.  # in deg
        self.phase_max = 0.  # in deg
        self.synch_pahse = 0.  # in d
        self.fourier_amp = 0.
        self.fourier_phase = 0.  # in deg
        self.amp = 0.
        nPartsGlobal = bunch.getSizeGlobal()
        if (nPartsGlobal == 0): return
        x_avg = 0.
        y_avg = 0.
        phase_avg = 0.
        beta = bunch.getSyncParticle().beta()
        z_to_phase = -360. * self.frequency / (speed_of_light * beta)
        synch_phase = 360. * bunch.getSyncParticle().time() * self.frequency
        self.synch_pahse = phaseNearTargetPhaseDeg(synch_phase, 0.)
        nParts = bunch.getSize()
        for ind in range(nParts):
            x_avg += bunch.x(ind)
            y_avg += bunch.y(ind)
            phase_avg += z_to_phase * bunch.z(ind)
        #---- for parallel case
        (x_avg, y_avg, phase_avg) = orbit_mpi.MPI_Allreduce(
            (x_avg, y_avg, phase_avg), mpi_datatype.MPI_DOUBLE, mpi_op.MPI_SUM,
            comm)
        x_avg /= nPartsGlobal
        y_avg /= nPartsGlobal
        phase_avg /= nPartsGlobal
        phase2_avg = 0.
        for ind in range(nParts):
            phase2_avg += (z_to_phase * bunch.z(ind) - phase_avg)**2
        phase2_avg /= nPartsGlobal
        self.rms_phase = math.sqrt(phase2_avg)
        self.x_avg = x_avg
        self.y_avg = y_avg
        phase_avg += synch_phase
        self.phase_avg = phaseNearTargetPhaseDeg(phase_avg, 0.)
        #------- fill out histogram
        nHistP = len(self.phase_hist_arr)
        for ind in range(nParts):
            phase_ind = int(
                (180. + phaseNearTargetPhaseDeg(z_to_phase * bunch.z(ind), 0.))
                / self.phase_step)
            phase_ind = phase_ind % nHistP
            if (phase_ind < 0): phase_ind = 0
            if (phase_ind >= nHistP): phase_ind = nHistP - 1
            self.phase_hist_arr[phase_ind] += 1.0
        phase_hist_arr = orbit_mpi.MPI_Allreduce(self.phase_hist_arr,
                                                 mpi_datatype.MPI_DOUBLE,
                                                 mpi_op.MPI_SUM, comm)
        phase_hist_arr = list(phase_hist_arr)
        #---- find the position of the max value
        total_sum = 0.
        ind_max = -1
        max_val = 0
        for ind in range(nHistP):
            val = phase_hist_arr[ind]
            if (val > max_val):
                ind_max = ind
                max_val = val
            self.phase_hist_arr[ind] = val
            total_sum += val
        self.phase_max = (ind_max + 0.5) * self.phase_step
        self.phase_max -= 180.
        self.phase_max += synch_phase
        self.phase_max = phaseNearTargetPhaseDeg(self.phase_max, 0.)
        #---- calculate Fourier amplitude
        sin_sum = 0.
        cos_sum = 0.
        grad_to_rad_coeff = math.pi / 180.
        phase_step = self.phase_step * grad_to_rad_coeff
        #---- normalization
        n_local_coeff = 1. / (total_sum * phase_step)
        for ind in range(nHistP):
            phase_hist_arr[ind] *= n_local_coeff
        for ind in range(nHistP):
            self.phase_hist_arr[ind] = phase_hist_arr[ind]
        #--- Fourier amplitude and phase
        for ind in range(nHistP):
            val = phase_hist_arr[ind]
            phase = (ind + 0.5) * self.phase_step * grad_to_rad_coeff
            sin_sum += val * math.sin(phase)
            cos_sum += val * math.cos(phase)
        sin_sum *= self.phase_step * grad_to_rad_coeff
        cos_sum *= self.phase_step * grad_to_rad_coeff
        self.fourier_amp = math.sqrt(sin_sum**2 + cos_sum**2) / math.pi
        self.amp = self.norm_coeff * self.fourier_amp
        self.fourier_phase = -math.atan2(cos_sum,
                                         sin_sum) * 180. / math.pi - 90.
        self.fourier_phase += synch_phase
        self.fourier_phase = phaseNearTargetPhaseDeg(self.fourier_phase, 0.)
	tracker = RungeKuttaTracker(1000.0)
	First = DensityMatrix(Stark,10000.,LFS)
	fS=LSFieldSource(0.,0.,0.,Bx,0.,0.)
	tracker.track(bunch_target,0,time_step*n_step, time_step,fS,First)
	bunch_target.dumpBunch("bunch_res"+str(count)+".dat")
	population = 0.
	population2 = 0	
	for i in range(bunch_target.getSize()):
		val = (1-bunch_target.partAttrValue("Amplitudes",i,1))
		population += val
		population2 += val*val
	
	mpi_size = orbit_mpi.MPI_Comm_size(mpi_comm.MPI_COMM_WORLD)
	op = mpi_op.MPI_SUM
	data_type = mpi_datatype.MPI_DOUBLE
	population = orbit_mpi.MPI_Allreduce(population,data_type,op,mpi_comm.MPI_COMM_WORLD)
	population2 = orbit_mpi.MPI_Allreduce(population2,data_type,op,mpi_comm.MPI_COMM_WORLD)
	population = population/(mpi_size*N_part)
	sigma_pop = 0.
	if(N_part*mpi_size > 1):
		sigma_pop = math.sqrt((population2 - N_part*population*population)/(N_part*mpi_size*(N_part*mpi_size - 1)))
	time_live = orbit_mpi.MPI_Wtime() - time_start
	res = " %6.0f  %4.1f  %4.1f  %4.1f    %6.3f  %6.3f "%(time_live,power/1.0e+6,1.0e+6*wx,fxy*100,population,sigma_pop)
	if(rank == 0): print  "W [MW]= %4.1f  wx [um] = %4.1f  dist[cm]= %4.1f  Population: %7.3f +-  %7.3f"%(power/1.0e+6,1.0e+6*wx,fxy*100,population,sigma_pop)
	if(rank == 0):
		f_out = open(file_name,"a")
		f_out.write(str(count)+" "+res + "\n")
		f_out.close()
		
		
#graph = mygra.PlotPopl(population)
def saveBunchAsMatfile(bunch, filename=None):

    b = bunch
    #take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD
    comm = b.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size of the number of CPUs,
    # and have the number of macroparticles on each CPU
    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT,
                                          mpi_op.MPI_SUM, comm)

    mp_array = range(n_parts_arr[rank])
    particles = {}
    particles['x'] = map(b.x, mp_array)
    particles['xp'] = map(b.xp, mp_array)
    particles['y'] = map(b.y, mp_array)
    particles['yp'] = map(b.yp, mp_array)
    particles['z'] = map(b.z, mp_array)
    particles['dE'] = map(b.dE, mp_array)
    phase_space_keys = particles.keys()

    for attribute in b.getPartAttrNames():
        particles[attribute] = [[]
                                for i in range(b.getPartAttrSize(attribute))]
        for j in xrange(b.getPartAttrSize(attribute)):
            particles[attribute][j] += map(
                lambda i: b.partAttrValue(attribute, i, j), mp_array)

    #This is just for case. Actually, MPI_Barrier command is not necessary.
    orbit_mpi.MPI_Barrier(comm)

    for i_cpu in range(1, size):
        for key in phase_space_keys:
            if (rank == main_rank):
                #get the particle coordinates and attributes
                bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT,
                                                       i_cpu, 222, comm)
                if bunch_size_remote:
                    particles[key] += list(
                        np.atleast_1d(
                            orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu,
                                               222, comm)))
            elif (rank == i_cpu):
                #send the coordinate array if there are any particles ...
                bunch_size_local = bunch.getSize()
                orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT,
                                   main_rank, 222, comm)
                if bunch_size_local:
                    orbit_mpi.MPI_Send(particles[key], mpi_datatype.MPI_DOUBLE,
                                       main_rank, 222, comm)

    for i_cpu in range(1, size):
        for attribute in b.getPartAttrNames():
            if (rank == main_rank):
                bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT,
                                                       i_cpu, 222, comm)
                if bunch_size_remote:
                    #get the particle coordinates and attributes
                    for j in xrange(b.getPartAttrSize(attribute)):
                        particles[attribute][j] += list(
                            np.atleast_1d(
                                orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE,
                                                   i_cpu, 222, comm)))
            elif (rank == i_cpu):
                bunch_size_local = bunch.getSize()
                orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT,
                                   main_rank, 222, comm)
                if bunch_size_local:
                    #send the coordinate array if there are any particles ...
                    for j in xrange(b.getPartAttrSize(attribute)):
                        orbit_mpi.MPI_Send(particles[attribute][j],
                                           mpi_datatype.MPI_DOUBLE, main_rank,
                                           222, comm)

    bunchparameters = {'classical_radius': bunch.classicalRadius(), \
           'charge': bunch.charge(),
           'mass': bunch.mass(), \
           'momentum': bunch.getSyncParticle().momentum(), \
           'beta': bunch.getSyncParticle().beta(), \
           'gamma': bunch.getSyncParticle().gamma(), \
           'time': bunch.getSyncParticle().time()}

    if filename:
        if rank == main_rank:
            sio.savemat(filename + '.mat', {
                'particles': particles,
                'bunchparameters': bunchparameters
            },
                        do_compression=True)
    orbit_mpi.MPI_Barrier(comm)
Exemple #9
0
def BunchGather(bunch, turn, p, plot_footprint=False):

    b = bunch
    verbose = False

    # take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD
    comm = b.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size of the number of CPUs,
    # and have the number of macroparticles on each CPU
    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT,
                                          mpi_op.MPI_SUM, comm)

    if verbose:
        print 'BunchMoments:: bunch_size on MPI Rank: ', rank, ' = ', n_parts_arr[
            rank]
        print 'BunchMoments:: n_parts_arr on MPI Rank: ', rank, ' = ', n_parts_arr

    mp_array = range(n_parts_arr[rank])
    particles = {}
    particles['x'] = map(b.x, mp_array)
    particles['xp'] = map(b.xp, mp_array)
    particles['y'] = map(b.y, mp_array)
    particles['yp'] = map(b.yp, mp_array)
    particles['z'] = map(b.z, mp_array)
    particles['dE'] = map(b.dE, mp_array)
    phase_space_keys = particles.keys()

    for attribute in b.getPartAttrNames():
        particles[attribute] = [[]
                                for i in range(b.getPartAttrSize(attribute))]
        for j in xrange(b.getPartAttrSize(attribute)):
            particles[attribute][j] += map(
                lambda i: b.partAttrValue(attribute, i, j), mp_array)

    # This is just in case. Actually, MPI_Barrier command is not necessary.
    orbit_mpi.MPI_Barrier(comm)

    for i_cpu in range(1, size):
        for key in phase_space_keys:
            if (rank == main_rank):
                # get the particle coordinates and attributes
                bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT,
                                                       i_cpu, 222, comm)
                if bunch_size_remote:
                    particles[key] += list(
                        np.atleast_1d(
                            orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu,
                                               222, comm)))
            elif (rank == i_cpu):
                # send the coordinate array if there are any particles ...
                bunch_size_local = bunch.getSize()
                orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT,
                                   main_rank, 222, comm)
                if bunch_size_local:
                    orbit_mpi.MPI_Send(particles[key], mpi_datatype.MPI_DOUBLE,
                                       main_rank, 222, comm)

    for i_cpu in range(1, size):
        for attribute in b.getPartAttrNames():
            if (rank == main_rank):
                bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT,
                                                       i_cpu, 222, comm)
                if bunch_size_remote:
                    # get the particle coordinates and attributes
                    for j in xrange(b.getPartAttrSize(attribute)):
                        particles[attribute][j] += list(
                            np.atleast_1d(
                                orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE,
                                                   i_cpu, 222, comm)))
            elif (rank == i_cpu):
                bunch_size_local = bunch.getSize()
                orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT,
                                   main_rank, 222, comm)
                if bunch_size_local:
                    # send the coordinate array if there are any particles ...
                    for j in xrange(b.getPartAttrSize(attribute)):
                        orbit_mpi.MPI_Send(particles[attribute][j],
                                           mpi_datatype.MPI_DOUBLE, main_rank,
                                           222, comm)

    bunchparameters = {'classical_radius': bunch.classicalRadius(), \
           'charge': bunch.charge(),
           'mass': bunch.mass(), \
           'momentum': bunch.getSyncParticle().momentum(), \
           'beta': bunch.getSyncParticle().beta(), \
           'gamma': bunch.getSyncParticle().gamma(), \
           'time': bunch.getSyncParticle().time()}

    ########################################################################
    #                Plot tune footprint with histograms                   #
    ########################################################################

    if rank is main_rank:
        # ~ print 'Rank: ', rank
        if turn >= 0:
            # ~ print 'Turn: ', turn
            if plot_footprint:
                if verbose:
                    print 'BunchGather:: Plot tune footprint on rank', rank

                tunex = str(p['tunex'][0] + '.' + p['tunex'][1:])
                tuney = str(p['tuney'][0] + '.' + p['tuney'][1:])
                tunex_sav = str(p['tunex'][0] + 'p' + p['tunex'][1:])
                tuney_sav = str(p['tuney'][0] + 'p' + p['tuney'][1:])
                fontsize = 15

                qx = np.array(particles['ParticlePhaseAttributes'][2])
                qy = np.array(particles['ParticlePhaseAttributes'][3])

                qx[np.where(qx > 0.5)] -= 1
                qy[np.where((qy > 0.6) & (qx < 0.25))] -= 1

                print 'resonances'
                resonances = resonance_lines((5.75, 6.25), (5.75, 6.25),
                                             (1, 2, 3, 4), 10)
                fontsize = 17

                f, ax = plt.subplots(1, figsize=(6, 6))
                gridspec.GridSpec(3, 3)
                #f.subplots_adjust(hspace = 0)	# Horizontal spacing between subplots
                f.subplots_adjust(
                    wspace=0)  # Vertical spacing between subplots

                my_cmap = plt.cm.jet
                my_cmap.set_under('w', 1)

                r = resonances

                print 'title'
                title = str(tunex_sav + ' ' + tuney_sav + ' turn ' + str(turn))

                # First subplot
                print 'plot1'
                plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=1)
                plt.hist(6 + qx, bins=1000,
                         range=(r.Qx_min,
                                r.Qx_max))  #, norm=mcolors.PowerNorm(gamma))
                plt.ylabel('Frequency')
                plt.grid(which='both')
                plt.title(title, fontsize=fontsize)

                # Main plot
                print 'plot2'
                plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
                plt.hist2d(6 + qx,
                           6 + qy,
                           bins=1000,
                           cmap=my_cmap,
                           vmin=1,
                           range=[[r.Qx_min, r.Qx_max], [r.Qy_min, r.Qy_max]
                                  ])  #, norm=mcolors.PowerNorm(gamma))
                plt.xlabel(r'Q$_x$')
                plt.ylabel(r'Q$_y$')

                print 'plot_resonance'
                resonances.plot_resonance(f)

                # Second subplot
                print 'plot3'
                plt.subplot2grid((3, 3), (1, 2), colspan=1, rowspan=2)
                plt.hist(6 + qy,
                         bins=1000,
                         range=(r.Qy_min, r.Qy_max),
                         orientation=u'horizontal'
                         )  #, norm=mcolors.PowerNorm(gamma))
                plt.xlabel('Frequency')
                plt.grid(which='both')

                current_axis = plt.gca()
                #current_axis.axes.get_yaxis().set_visible(False)

                ax.xaxis.label.set_size(fontsize)
                ax.yaxis.label.set_size(fontsize)
                ax.tick_params(labelsize=fontsize)

                plt.tight_layout()
                savename = str('Tune_Footprints/' + tunex_sav + '_' +
                               tuney_sav + '_turn_' + str(turn) + '_hist.png')

                print 'savefig'
                f.savefig(savename, dpi=100)
                plt.close(f)

    outputs = dict()
    if rank is main_rank:
        x = np.array(particles['x'])
        xp = np.array(particles['xp'])
        y = np.array(particles['y'])
        yp = np.array(particles['yp'])
        z = np.array(particles['z'])
        dE = np.array(particles['dE'])

        mu_x = moment(x, 2)
        mu_xp = moment(xp, 2)
        mu_y = moment(y, 2)
        mu_yp = moment(yp, 2)
        mu_z = moment(z, 2)
        mu_dE = moment(dE, 2)

        sig_x = np.sqrt(mu_x)
        sig_xp = np.sqrt(mu_xp)
        sig_y = np.sqrt(mu_y)
        sig_yp = np.sqrt(mu_yp)
        sig_z = np.sqrt(mu_z)
        sig_dE = np.sqrt(mu_dE)

        x_6_sig = x[np.where((x >= -6 * sig_x) & (x <= 6 * sig_x))]
        xp_6_sig = xp[np.where((xp >= -6 * sig_xp) & (xp <= 6 * sig_xp))]
        y_6_sig = y[np.where((y >= -6 * sig_y) & (y <= 6 * sig_y))]
        yp_6_sig = yp[np.where((yp >= -6 * sig_yp) & (yp <= 6 * sig_yp))]
        z_6_sig = z[np.where((z >= -6 * sig_z) & (z <= 6 * sig_z))]
        dE_6_sig = dE[np.where((dE >= -6 * sig_dE) & (dE <= 6 * sig_dE))]

        # Later add something to cut large amplitude particles to reduce noise for kurtosis calculation
        outputs = {
            'Mu_x': mu_x,
            'Mu_xp': mu_xp,
            'Mu_y': mu_y,
            'Mu_yp': mu_yp,
            'Mu_z': mu_z,
            'Mu_dE': mu_dE,
            'Sig_x': sig_x,
            'Sig_xp': sig_xp,
            'Sig_y': sig_y,
            'Sig_yp': sig_yp,
            'Sig_z': sig_z,
            'Sig_dE': sig_dE,
            'Max_x': np.max(x),
            'Max_xp': np.max(xp),
            'Max_y': np.max(y),
            'Max_yp': np.max(yp),
            'Max_z': np.max(z),
            'Max_dE': np.max(dE),
            'Min_x': np.min(x),
            'Min_xp': np.min(xp),
            'Min_y': np.min(y),
            'Min_yp': np.min(yp),
            'Min_z': np.min(z),
            'Min_dE': np.min(dE),
            'Kurtosis_x': kurtosis(x, fisher=True, nan_policy='omit'),
            'Kurtosis_xp': kurtosis(xp, fisher=True, nan_policy='omit'),
            'Kurtosis_y': kurtosis(y, fisher=True, nan_policy='omit'),
            'Kurtosis_yp': kurtosis(yp, fisher=True, nan_policy='omit'),
            'Kurtosis_z': kurtosis(z, fisher=True, nan_policy='omit'),
            'Kurtosis_dE': kurtosis(dE, fisher=True, nan_policy='omit'),
            'Kurtosis_x_6sig': kurtosis(x_6_sig,
                                        fisher=True,
                                        nan_policy='omit'),
            'Kurtosis_xp_6sig': kurtosis(xp_6_sig,
                                         fisher=True,
                                         nan_policy='omit'),
            'Kurtosis_y_6sig': kurtosis(y_6_sig,
                                        fisher=True,
                                        nan_policy='omit'),
            'Kurtosis_yp_6sig': kurtosis(yp_6_sig,
                                         fisher=True,
                                         nan_policy='omit'),
            'Kurtosis_z_6sig': kurtosis(z_6_sig,
                                        fisher=True,
                                        nan_policy='omit'),
            'Kurtosis_dE_6sig': kurtosis(dE_6_sig,
                                         fisher=True,
                                         nan_policy='omit')
        }

    return outputs
Exemple #10
0
    def population(self):
        


        
        bunch_target = self.bunch_target
        mpi_size = orbit_mpi.MPI_Comm_size(mpi_comm.MPI_COMM_WORLD)
        

        bunch = self.bunch
        N_part = bunch.getSize()
        TK = self.TK
        n_step = self.n_step
        power = self.power
        n_states = self.n_states
        cut_par = self.cut_par
        sigmaZ_beam = self.sigmaZ_beam
        env_sigma = self.env_sigma
        Nevol = self.Nevol
        print_file = self.print_file 
        
        if (self.method == 2):
            St = self.St
        if (self.method == 3):
            StSf = self.StSf
        if (self.method == 4):
            continuum_spectr = self.continuum_spectr
        
        fS = self.fS
        la = self.la
        fx = self.fx
        fy = self.fy
        wx = self.wx
        wy = self.wy 
               
        rx = self.rx
        ry = self.ry
        ax = self.ax
        ay = self.ay
        
        method = self.method
        Bx = self.Bx
        By = self.By
        Bz = self.Bz
        ####### Here are defined parameters of the function ###############       

  
        E = bunch.mass() + TK
        P = math.sqrt(E*E - bunch.mass()*bunch.mass())
        vz = 299792458*P/E
        
        fS = ConstEMfield(0.,0.,0.,Bx,0.,0.)                                  
                    
        bunch_target.deleteAllParticles()
        bunch.copyBunchTo(bunch_target)
        
        
        if (method == 1):
            dip_transition = math.sqrt(256*math.pow(n_states,7)*math.pow(n_states-1,2*n_states-5)/3/math.pow(n_states+1,2*n_states+5))
            delta_E = 1./2. - 1./(2.*n_states*n_states)
            
        if (method == 2):
            delta_E = 1./2. - 1./(2.*n_states*n_states)      
            
        if (method == 3):
            delta_E = StSf.deltaE(bunch.mass(),0.,0.,0.,Bx,By,Bz,0.,0.,P)
            
        if (method == 4):
            delta_E = continuum_spectr.setField_returndE(bunch.mass(),0.,0.,0.,Bx,By,Bz,0.,0.,P)
        
        
        la0 = 2*math.pi*5.291772108e-11/7.297352570e-3/delta_E
        te = TK - bunch.mass()*(la/la0-1)
        kz = te/math.sqrt(P*P-te*te)
        #kz = math.tan(2*math.pi*(39.23-90)/360)
        print "angle = ",math.atan2(1,-kz)*360/2/math.pi, "kz = ", kz
        #kz = -1.22020387566
        #print sigmaZ_beam

    

        zb = -5*sigmaZ_beam
        zl = zb*E/P
        time_step = (2*abs(zb)/vz)/n_step



        for i in range(N_part):
            z = bunch_target.z(i)
            bunch_target.z(i,z + zb)   
        #bunch_target.dumpBunch("bunch_ini"+str(count)+".dat")
        LFS = HermiteGaussianLFmode(math.sqrt(power),0,0,abs(wx), abs(wy),fx,fy,la,zl,env_sigma)
        LFS.setLocalParameters(abs(rx), abs(ry),ax,ay)
        
        LFS.setLaserFieldOrientation(0.,0.,0.,   -1.,0.,kz,   kz,0.,1.,  kz,0.,1.)    #perpendicular polarization
#        LFS.setLaserFieldOrientation(0.,0.,0.,   -1.,0.,kz,   kz,0.,1.,  0.,1.,0.)      #parallel polarization
        tracker = RungeKuttaTracker(0)
        
        if (method == 1):   eff = TwoLevelAtom(LFS,delta_E,dip_transition)
        if (method == 2):   eff = SchrodingerEquation(LFS,St,cut_par)
        if (method == 3):   eff = TwoLevelStrongField(LFS, StSf)
        if (method == 4):   eff = ContinuumSS(LFS,continuum_spectr)
      
        cont_eff = ExtEffectsContainer()  
        cont_eff.AddEffect(eff)
        
        if(print_file):
            pr = PrintExtEffects("Populations",n_step,os.environ["ORBIT_ROOT"]+"/ext/laserstripping/working_dir/"+"/data3.0")
            cont_eff.AddEffect(pr)
        if(Nevol != 0):
            evo = RecordEvolution("Populations",0,Nevol)
            cont_eff.AddEffect(evo)
        
        tracker.track(bunch_target,0,time_step*n_step, time_step,fS,cont_eff)
        
#        for i in range(N_part):
#            z = bunch_target.z(i)
#            bunch_target.z(i,z - vz*time_step*n_step) 
#        tracker.track(bunch_target,0,time_step*n_step, time_step,fS,cont_eff)

        population = 0.
        population2 = 0.   
        p_ioniz = 0.
        p_ioniz2 = 0.
        for i in range(N_part):
            
            if (method != 4):
                val = 1 - bunch_target.partAttrValue("Populations",i,0) - bunch_target.partAttrValue("Populations",i,1)
                p_val = bunch_target.partAttrValue("Populations",i,0)
                population += val
                population2 += val*val
                p_ioniz += p_val
                p_ioniz2 += p_val*p_val
            else:
                val = 1 - bunch_target.partAttrValue("Populations",i,0)
                p_val = 1 - bunch_target.partAttrValue("Populations",i,0)
                population += val
                population2 += val*val
                p_ioniz += p_val
                p_ioniz2 += p_val*p_val
                
            
        op = mpi_op.MPI_SUM
        data_type = mpi_datatype.MPI_DOUBLE
        population = orbit_mpi.MPI_Allreduce(population,data_type,op,mpi_comm.MPI_COMM_WORLD)
        population2 = orbit_mpi.MPI_Allreduce(population2,data_type,op,mpi_comm.MPI_COMM_WORLD)
        p_ioniz = orbit_mpi.MPI_Allreduce(p_ioniz,data_type,op,mpi_comm.MPI_COMM_WORLD)
        p_ioniz2 = orbit_mpi.MPI_Allreduce(p_ioniz2,data_type,op,mpi_comm.MPI_COMM_WORLD)
        population = population/(mpi_size*N_part)
        p_ioniz = p_ioniz/(mpi_size*N_part)
        sigma_pop = 0.
        sigma_p_ioniz = 0.
        if(N_part*mpi_size > 1):
            sigma_pop = math.sqrt((population2 - N_part*mpi_size*population*population)/(N_part*mpi_size*(N_part*mpi_size - 1)))
            sigma_p_ioniz = math.sqrt((p_ioniz2 - N_part*mpi_size*p_ioniz*p_ioniz)/(N_part*mpi_size*(N_part*mpi_size - 1)))

        return population, sigma_pop, p_ioniz, sigma_p_ioniz
for i in range(b.getSize()):
    x = b.x(i)
    xp = b.xp(i)
    y = b.y(i)
    yp = b.yp(i)
    z = b.z(i)
    dE = b.dE(i)
    x_sum += x
    xp_sum += xp
    y_sum += y
    yp_sum += yp
    phi_sum += z
    dE_sum += dE

var_arr = (x_sum, xp_sum, y_sum, yp_sum, phi_sum, dE_sum)
var_arr = orbit_mpi.MPI_Allreduce(var_arr, mpi_datatype.MPI_DOUBLE,
                                  mpi_op.MPI_SUM, comm)
(x_sum, xp_sum, y_sum, yp_sum, phi_sum, dE_sum) = var_arr
if (rank == 0):
    print "================ parallel ============="
    print "x_sum   =", x_sum
    print "xp_sum  =", xp_sum
    print "y_sum   =", y_sum
    print "yp_sum  =", yp_sum
    print "phi_sum =", phi_sum
    print "dE_sum  =", dE_sum

if (rank == 0):
    x_sum = 0.
    xp_sum = 0.
    y_sum = 0.
    yp_sum = 0.
Exemple #12
0
def Freq_spread(bunch, la, n):

    delta_E = 1. / 2. - 1. / (2. * n * n)
    m = bunch.mass()

    op = mpi_op.MPI_SUM
    data_type = mpi_datatype.MPI_DOUBLE
    mpi_size = orbit_mpi.MPI_Comm_size(mpi_comm.MPI_COMM_WORLD)

    N = bunch.getSize()
    pz = 0
    for i in range(N):
        pz += bunch.pz(i)
    pz = orbit_mpi.MPI_Allreduce(pz, data_type, op, mpi_comm.MPI_COMM_WORLD)
    pz = pz / (mpi_size * N)

    E = math.sqrt(pz * pz + m * m)
    TK = E - m

    la0 = 2 * math.pi * 5.291772108e-11 / 7.297352570e-3 / delta_E
    te = TK - m * (la / la0 - 1)
    kzz = te / math.sqrt(pz * pz - te * te)

    kx = -1.
    ky = 0.
    kz = kzz

    om = 0
    om2 = 0

    for i in range(N):

        px = bunch.px(i)
        py = bunch.py(i)
        pz = bunch.pz(i)

        P2 = px * px + py * py + pz * pz
        K = math.sqrt(kx * kx + ky * ky + kz * kz)
        P = math.sqrt(P2)

        E = math.sqrt(P2 + m * m)

        beta = P / E
        gamma = E / m

        cos = (px * kz + py * ky + pz * kz) / (K * P)

        la0 = la / (gamma * (1 - beta * cos))
        omega = 2 * math.pi * 5.291772108e-11 / 7.297352570e-3 / la0

        om += omega
        om2 += omega * omega

#        f = open('bunch_parameters.txt','a')
#        print >>f, omega
#        f.close()

    om = orbit_mpi.MPI_Allreduce(om, data_type, op, mpi_comm.MPI_COMM_WORLD)
    om = om / (mpi_size * N)

    om2 = orbit_mpi.MPI_Allreduce(om2, data_type, op, mpi_comm.MPI_COMM_WORLD)
    om2 = om2 / (mpi_size * N)

    sigma_om = math.sqrt(om2 - om**2)

    return (om, sigma_om)
Exemple #13
0
def xyBeamEmittances(bunch):

    com = bunch.getMPIComm()
    mpi_size = orbit_mpi.MPI_Comm_size(com)
    op = mpi_op.MPI_SUM
    data_type = mpi_datatype.MPI_DOUBLE

    N_part_loc = bunch.getSize()
    N_part_glob = bunch.getSizeGlobal()

    P = 0
    for i in range(N_part_loc):
        P += bunch.pz(i)
    P = orbit_mpi.MPI_Allreduce(P, data_type, op, com)
    P = P / N_part_glob

    XP0 = 0
    YP0 = 0
    X0 = 0
    Y0 = 0
    for i in range(N_part_loc):
        XP0 += bunch.px(i) / bunch.pz(i)
        YP0 += bunch.py(i) / bunch.pz(i)
        X0 += bunch.x(i)
        Y0 += bunch.y(i)
    XP0 = orbit_mpi.MPI_Allreduce(XP0, data_type, op, com)
    YP0 = orbit_mpi.MPI_Allreduce(YP0, data_type, op, com)
    X0 = orbit_mpi.MPI_Allreduce(X0, data_type, op, com)
    Y0 = orbit_mpi.MPI_Allreduce(Y0, data_type, op, com)
    XP0 = XP0 / N_part_glob
    YP0 = YP0 / N_part_glob
    X0 = X0 / N_part_glob
    Y0 = Y0 / N_part_glob

    XP2 = 0
    YP2 = 0
    X2 = 0
    Y2 = 0
    PXP = 0
    PYP = 0
    for i in range(N_part_loc):

        XP = bunch.px(i) / bunch.pz(i) - XP0
        YP = bunch.py(i) / bunch.pz(i) - YP0
        X = bunch.x(i) - X0
        Y = bunch.y(i) - Y0

        XP2 += XP * XP
        YP2 += YP * YP
        X2 += X * X
        Y2 += Y * Y
        PXP += XP * X
        PYP += YP * Y

    XP2 = orbit_mpi.MPI_Allreduce(XP2, data_type, op, com)
    YP2 = orbit_mpi.MPI_Allreduce(YP2, data_type, op, com)
    X2 = orbit_mpi.MPI_Allreduce(X2, data_type, op, com)
    Y2 = orbit_mpi.MPI_Allreduce(Y2, data_type, op, com)
    PXP = orbit_mpi.MPI_Allreduce(PXP, data_type, op, com)
    PYP = orbit_mpi.MPI_Allreduce(PYP, data_type, op, com)
    XP2 = XP2 / N_part_glob
    YP2 = YP2 / N_part_glob
    X2 = X2 / N_part_glob
    Y2 = Y2 / N_part_glob
    PXP = PXP / N_part_glob
    PYP = PYP / N_part_glob

    ex = math.sqrt(X2 * XP2 - PXP * PXP)
    ey = math.sqrt(Y2 * YP2 - PYP * PYP)

    E = math.sqrt(P * P + bunch.mass() * bunch.mass())

    beta_rel = P / E
    gamma_rel = 1. / math.sqrt(1 - beta_rel * beta_rel)

    exn = ex * beta_rel * gamma_rel
    eyn = ey * beta_rel * gamma_rel
    print beta_rel * gamma_rel

    return exn, eyn
Exemple #14
0
def profiles(Bunch, coord, histogram, steps=100, Min=1.0, Max=-1.0):
    """
        Returns a profile for one of the following Bunch coordinates:
	x[m] xp[rad] y[m] yp[rad] z[m] dE[GeV]
	"""

    b = Bunch

    # Take the MPI Communicator from bunch: It could be
    # different from MPI_COMM_WORLD

    comm = Bunch.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size of the number of CPUs,
    # contains the number of macroparticles on each CPU

    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, \
                   mpi_datatype.MPI_INT,mpi_op.MPI_SUM,comm)

    partdat = []

    if (rank == main_rank):
        for i in range(n_parts_arr[rank]):
            if coord == "x":
                partdat.append(b.x(i))
            if coord == "px":
                partdat.append(b.px(i))
            if coord == "y":
                partdat.append(b.y(i))
            if coord == "py":
                partdat.append(b.py(i))
            if coord == "z":
                partdat.append(b.z(i))
            if coord == "dE":
                partdat.append(b.dE(i))

    # That is just for case.
    # Actually, MPI_Barrier command is not necessary.

    orbit_mpi.MPI_Barrier(comm)

    val_arr = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)

    for i_cpu in range(1, size):

        # Again, that is just for case.
        # Actually, MPI_Barrier command is not necessary.
        orbit_mpi.MPI_Barrier(comm)

        for i in range(n_parts_arr[i_cpu]):
            if (rank == main_rank):

                #get the coordinate array
                (x, px, y, py, z, dE) = \
                                            orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, \
                                            i_cpu, 222, comm)
                if coord == "x":
                    partdat.append(x)
                if coord == "px":
                    partdat.append(px)
                if coord == "y":
                    partdat.append(y)
                if coord == "py":
                    partdat.append(py)
                if coord == "z":
                    partdat.append(z)
                if coord == "dE":
                    partdat.append(dE)

            elif (rank == i_cpu):
                #send the coordinate array
                x = b.x(i)
                px = b.px(i)
                y = b.y(i)
                py = b.py(i)
                z = b.z(i)
                dE = b.dE(i)
                val_arr = (x, px, y, py, z, dE)
                orbit_mpi.MPI_Send(val_arr, \
                                            mpi_datatype.MPI_DOUBLE, main_rank, 222, comm)

    l = len(partdat)
    m = min(partdat)
    M = max(partdat)

    c = (M + m) / 2.0
    d = (M - m) * 1.1 / 2.0
    M = c + d
    m = c - d

    if Max > M:
        M = Max
    if Min < m:
        m = Min

    dx = (M - m) / steps

    grid = [m]
    prof = [0]
    for i in range(1, steps + 1):
        x = m + i * dx
        grid.append(x)
        prof.append(0)
    grid.append(M)
    prof.append(0)

    for n in range(l):
        i = (partdat[n] - m) / dx
        i = int(i)
        if i < 0:
            pass
        elif i > range(steps):
            pass
        else:
            frac = (partdat[n] - m) / dx % 1
            prof[i] = prof[i] + (1.0 - frac)
            prof[i + 1] = prof[i + 1] + frac

    sum = 0.0
    for i in range(steps + 1):
        sum = sum + prof[i]

    file_out = histogram
    if (rank == main_rank):
        file_out = open(histogram, "w")

        file_out.write("Min = " + str(m) + "  Max = " + \
                       str(M) + " steps = " + str(steps) + "\n")
        file_out.write("nParts = " + str(l) + " HistSum = " + \
                       str(sum) + "\n\n")

        for i in range(steps + 1):
            file_out.write(str(grid[i]) + "   " + \
                           str(prof[i]) + "\n")

    if (rank == main_rank):
        file_out.close()
Exemple #15
0
def bunch_pyorbit_to_orbit_nHarm(ringLength, nHarm, pyOrbitBunch, \
 name_of_orbit_mpi_bunch_file):
    """
	Translates pyORBIT bunch to ORBIT_MPI bunch, incorporating RF
	harmonic number, and dumps it into a file.
	The ring length should be defined in the input (in meters).
	Lines in bunch files:
	ORBIT_MPI: x[mm] xp[mrad] y[mm] yp[mrad] phi[rad] dE[GeV].
	pyORBIT:   x[m]  xp[rad]  y[m]  yp[rad]  z[m]     dE[GeV]
	"""
    pi2 = 2.0 * math.pi
    zfac = pi2 * nHarm / ringLength
    b = pyOrbitBunch
    # Take the MPI Communicator from bunch: it could be different
    # from MPI_COMM_WORLD
    comm = pyOrbitBunch.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size number of CPUs,
    # Contains the number of macroparticles on each CPU
    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, \
     mpi_datatype.MPI_INT, mpi_op.MPI_SUM, comm)

    file_out = None
    if (rank == main_rank):
        file_out = open(name_of_orbit_mpi_bunch_file, "w")

    if (rank == main_rank):
        for i in range(n_parts_arr[rank]):
            x = b.x(i) * 1000.
            px = b.px(i) * 1000.
            y = b.y(i) * 1000.
            py = b.py(i) * 1000.
            z = -(math.fmod(b.z(i) * zfac, pi2))
            if (z > math.pi):
                z = z - 2 * math.pi
            if (z < -math.pi):
                z = z + 2 * math.pi
            dE = b.dE(i)
            file_out.write(str(x) + " " + str(px) + " " + \
             str(y) + " " + str(py) + " "+ \
             str(z) + " " + str(dE) + "\n")

    # MPI_Barrier command is not necessary.
    orbit_mpi.MPI_Barrier(comm)

    val_arr = (0., 0., 0., 0., 0., 0.)

    for i_cpu in range(1, size):
        #Again, MPI_Barrier command is not necessary.
        orbit_mpi.MPI_Barrier(comm)
        for i in range(n_parts_arr[i_cpu]):
            if (rank == main_rank):
                # Get the coordinate array
                (x, px, y, py, z, dE) = orbit_mpi.MPI_Recv(\
                 mpi_datatype.MPI_DOUBLE, \
                 i_cpu, 222, comm)
                file_out.write(str(x) + " " + str(px) + \
                 " " + str(y) + " " + str(py) + \
                 " " + str(z) + " " + str(dE) + "\n")
            elif (rank == i_cpu):
                #send the coordinate array
                x = b.x(i) * 1000.
                px = b.px(i) * 1000.
                y = b.y(i) * 1000.
                py = b.py(i) * 1000.
                z = -(math.fmod(b.z(i) * zfac, pi2))
                if (z > math.pi):
                    z = z - 2 * math.pi
                if (z < -math.pi):
                    z = z + 2 * math.pi
                dE = b.dE(i)
                val_arr = (x, px, y, py, z, dE)
                orbit_mpi.MPI_Send(val_arr, \
                 mpi_datatype.MPI_DOUBLE, \
                 main_rank, 222, comm)

    if (rank == main_rank):
        file_out.close()
def bunch_pyorbit_to_orbit(ringLength, pyOrbitBunch,
                           name_of_orbit_mpi_bunch_file):
    """
	Translates pyORBIT bunch to ORBIT_MPI bunch and dumps it into the file.
	The ring length should be defined in the input (in meters).
	ORBIT_MPI file has lines: x[mm] xp[mrad] y[mm] yp[mrad]   phi[rad]  dE[GeV].
	pyORBIT: x[m] xp[rad] y[m] yp[rad]  z[m]  dE[GeV]
	"""
    pi2 = 2.0 * math.pi
    L = ringLength
    b = pyOrbitBunch
    #take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD
    comm = pyOrbitBunch.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size of the number of CPUs,
    # and have the number of macroparticles on each CPU
    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT,
                                          mpi_op.MPI_SUM, comm)

    file_out = None
    if (rank == main_rank):
        file_out = open(name_of_orbit_mpi_bunch_file, "w")

    if (rank == main_rank):
        for i in range(n_parts_arr[rank]):
            x = b.x(i) * 1000.
            px = b.px(i) * 1000.
            y = b.y(i) * 1000.
            py = b.py(i) * 1000.
            z = -(math.fmod(b.z(i) * pi2 / L, pi2))
            if (z > math.pi):
                z = z - 2 * math.pi
            if (z < -math.pi):
                z = z + 2 * math.pi
            dE = b.dE(i)
            file_out.write(
                str(x) + " " + str(px) + " " + str(y) + " " + str(py) + " " +
                str(z) + " " + str(dE) + "\n")

    #That is just for case. Actually, MPI_Barrier command is not necessary.
    orbit_mpi.MPI_Barrier(comm)

    val_arr = (0., 0., 0., 0., 0., 0.)

    for i_cpu in range(1, size):
        #Again, that is just for case. Actually, MPI_Barrier command is not necessary.
        orbit_mpi.MPI_Barrier(comm)
        for i in range(n_parts_arr[i_cpu]):
            if (rank == main_rank):
                #get the coordinate array
                (x, px, y, py, z,
                 dE) = orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu, 222,
                                          comm)
                file_out.write(
                    str(x) + " " + str(px) + " " + str(y) + " " + str(py) +
                    " " + str(z) + " " + str(dE) + "\n")
            elif (rank == i_cpu):
                #send the coordinate array
                x = b.x(i) * 1000.
                px = b.px(i) * 1000.
                y = b.y(i) * 1000.
                py = b.py(i) * 1000.
                z = -(math.fmod(b.z(i) * pi2 / L, pi2))
                if (z > math.pi):
                    z = z - 2 * math.pi
                if (z < -math.pi):
                    z = z + 2 * math.pi
                dE = b.dE(i)
                val_arr = (x, px, y, py, z, dE)
                orbit_mpi.MPI_Send(val_arr, mpi_datatype.MPI_DOUBLE, main_rank,
                                   222, comm)

    if (rank == main_rank):
        file_out.close()