def bunch_from_matfile(matfile):
    d = sio.loadmat(matfile, squeeze_me=True)
    p = dict((key, value) for (key, value) in map(
        lambda k: (k, d['particles'][k][()]), d['particles'].dtype.names))
    attributes = list(set(p) - set(['x', 'xp', 'y', 'yp', 'z', 'dE']))
    attributes.sort(key=str.lower)

    bunch = Bunch()
    bunch.classicalRadius(d['bunchparameters']['classical_radius'])
    bunch.charge(d['bunchparameters']['charge'])
    bunch.mass(d['bunchparameters']['mass'])
    bunch.getSyncParticle().momentum(d['bunchparameters']['momentum'])
    bunch.getSyncParticle().time(d['bunchparameters']['time'])

    x = np.atleast_1d(d['particles']['x'][()])
    xp = np.atleast_1d(d['particles']['xp'][()])
    y = np.atleast_1d(d['particles']['y'][()])
    yp = np.atleast_1d(d['particles']['yp'][()])
    z = np.atleast_1d(d['particles']['z'][()])
    dE = np.atleast_1d(d['particles']['dE'][()])
    n_part = len(x)

    import orbit_mpi
    comm = bunch.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)

    count = n_part / size
    remainder = n_part % size
    if (rank < remainder):
        i_start = rank * (count + 1)
        i_stop = i_start + count + 1
    else:
        i_start = rank * count + remainder
        i_stop = i_start + count
    # print rank, i_start, i_stop

    map(lambda i: bunch.addParticle(x[i], xp[i], y[i], yp[i], z[i], dE[i]),
        xrange(i_start, i_stop))
    orbit_mpi.MPI_Barrier(comm)
    for a in attributes:
        bunch.addPartAttr(a)
        a_size = bunch.getPartAttrSize(a)
        if a_size > 1:
            for j in xrange(a_size):
                map(
                    lambda
                    (ip, i): bunch.partAttrValue(a, ip, j,
                                                 np.atleast_1d(p[a][j])[i]),
                    enumerate(xrange(i_start, i_stop)))
        else:
            map(
                lambda (ip, i): bunch.partAttrValue(a, ip, 0,
                                                    np.atleast_1d(p[a])[i]),
                enumerate(xrange(i_start, i_stop)))
    orbit_mpi.MPI_Barrier(comm)
    return bunch
 def call(*args, **kwargs):
     comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
     rank = orbit_mpi.MPI_Comm_rank(comm)
     if not rank:
         result = func(*args, **kwargs)
     else:
         result = None
     orbit_mpi.MPI_Barrier(comm)
     return result
Пример #3
0
def write_SextupoleRamp_files(target_file, pattern, ptc_source_table):
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
    rank = orbit_mpi.MPI_Comm_rank(comm)
    if not rank:
        with open(target_file, 'w') as fid:
            fid.write('SET ORBIT RAMPING \n')
            fid.write(' ramp\n %s\t"%s"\t%1.9f \n' %
                      (pattern, ptc_source_table, 1.0))
            fid.write('return')
    orbit_mpi.MPI_Barrier(comm)
Пример #4
0
 def call(*args, **kwargs):
     comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
     rank = orbit_mpi.MPI_Comm_rank(comm)
     #print 'rank %i before executing the function'%rank
     if not rank:
         #print 'rank %i executing the function'%rank
         result = func(*args, **kwargs)
     else:
         result = None
     #print 'rank %i before the barrier'%rank
     orbit_mpi.MPI_Barrier(comm)
     #print 'rank %i after the barrier'%rank
     return result
Пример #5
0
def write_RFtable(filename, harmonic_factors, time, E_kin, RF_voltage, RF_phase):
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
    rank = orbit_mpi.MPI_Comm_rank(comm)
    if not rank:
        n_lines = len(time)
        n_harmonics = len(harmonic_factors)
        arr = np.vstack((time,E_kin, np.dstack((RF_voltage,RF_phase)).flatten().reshape(n_lines, 2*n_harmonics).T)).T    
        with open(filename, 'w') as fid:
            fid.write('%d  1  1  0  %d\n'%(n_lines, n_harmonics))
            fid.write('  '.join(map(lambda i: '%d'%i, harmonic_factors))+'\n')
            for j in xrange(n_lines):
                fid.write('\t'.join(map(lambda i: '%1.8f'%i, arr[j, :]))+'\n')
    orbit_mpi.MPI_Barrier(comm)
Пример #6
0
def write_QuadRamp_files(target_file, twissfile, pattern, ptc_source_table):
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
    rank = orbit_mpi.MPI_Comm_rank(comm)
    if not rank:
        t = metaclass.twiss(twissfile)
        q_i = [i for i, n in enumerate(t.NAME) if pattern in n]
        with open(target_file, 'w') as fid:
            fid.write('SET ORBIT RAMPING \n')
            for i in q_i:
                fid.write(' ramp\n %s\t"%s"\t%1.9f \n' %
                          (t.NAME[i], ptc_source_table, (t.K1L[i] / t.L[i]) /
                           (t.K1L[q_i[0]] / t.L[q_i[0]])))
            fid.write('return')
    orbit_mpi.MPI_Barrier(comm)
Пример #7
0
def write_PTCtable(filename, multipole_orders, time, normal_components, skew_components):
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
    rank = orbit_mpi.MPI_Comm_rank(comm)
    factor = 1./np.math.factorial(multipole_orders-1) # the factorial factor is needed to be consistent with MADX
    if not rank:
        n_lines = len(time)
        n_multipoles = 1 # number of multipole orders to be changed (for the moment only 1 is implemented)
        arr = np.vstack((time,normal_components*factor,skew_components*factor)).T    
        with open(filename, 'w') as fid:
            fid.write('%d  1  %d\n'%(n_lines, n_multipoles))
            fid.write('  %d\n'%multipole_orders)
            for j in xrange(n_lines):
                fid.write('\t'.join(map(lambda i: '%1.11f'%i, arr[j, :]))+'\n')
    orbit_mpi.MPI_Barrier(comm)
    
def generate_initial_distribution(parameters, Lattice,output_file = 'Input/ParticleDistribution.in', summary_file = 'Input/ParticleDistribution_summary.txt', outputFormat='Orbit'):
	parameters['alphax0'] = Lattice.alphax0
	parameters['betax0']  = Lattice.betax0
	parameters['alphay0'] = Lattice.alphay0
	parameters['betay0']  = Lattice.betay0
	parameters['etax0']   = Lattice.etax0
	parameters['etapx0']  = Lattice.etapx0
	parameters['etay0']   = Lattice.etay0
	parameters['etapy0']  = Lattice.etapy0
	parameters['x0']      = Lattice.orbitx0
	parameters['xp0']     = Lattice.orbitpx0
	parameters['y0']      = Lattice.orbity0
	parameters['yp0']     = Lattice.orbitpy0
	parameters['gamma_transition'] = Lattice.gammaT
	parameters['circumference']    = Lattice.getLength()
	parameters['length'] = Lattice.getLength()/Lattice.nHarm
	# twiss containers
	twissX = TwissContainer(alpha = parameters['alphax0'], beta = parameters['betax0'], emittance = parameters['epsn_x'] / parameters['gamma'] / parameters['beta'])
	twissY = TwissContainer(alpha = parameters['alphay0'], beta = parameters['betay0'], emittance = parameters['epsn_y'] / parameters['gamma'] / parameters['beta'])
	dispersionx = {'etax0': parameters['etax0'], 'etapx0': parameters['etapx0']}
	dispersiony = {'etay0': parameters['etay0'], 'etapy0': parameters['etapy0']}
	# ~ dispersionx = {'etax0': parameters['etax0'], 'etapx0': parameters['etapx0']}
	# ~ dispersiony = {'etay0': parameters['etay0'], 'etapy0': parameters['etapy0']}
	closedOrbitx = {'x0': parameters['x0'], 'xp0': parameters['xp0']} 
	closedOrbity = {'y0': parameters['y0'], 'yp0': parameters['yp0']} 

	# initialize particle arrays
	x = np.zeros(parameters['n_macroparticles'])
	xp = np.zeros(parameters['n_macroparticles'])
	y = np.zeros(parameters['n_macroparticles'])
	yp = np.zeros(parameters['n_macroparticles'])
	phi = np.zeros(parameters['n_macroparticles'])
	dE = np.zeros(parameters['n_macroparticles'])

	# building the distributions
	Transverse_distribution = GaussDist2D(twissX, twissY, cut_off=parameters['TransverseCut'])
	Longitudinal_distribution = LongitudinalJohoDistributionSingleHarmonic(parameters, parameters['LongitudinalJohoParameter'])

	# only the main CPU is actually writing its distribution to a file ...
	comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
	if orbit_mpi.MPI_Comm_rank(comm) == 0:
		with open(output_file,"w") as fid:
			csv_writer = csv.writer(fid, delimiter=' ')


			for i in range(parameters['n_macroparticles']):
				(phi[i], dE[i]) = Longitudinal_distribution.getCoordinates()
				(x[i], xp[i], y[i], yp[i]) = Transverse_distribution.getCoordinates()
				x[i] += closedOrbitx['x0']
				xp[i] += closedOrbitx['xp0']
				y[i] += closedOrbity['y0']
				yp[i] += closedOrbity['yp0']
				dpp = dE[i] / (parameters['energy']) / parameters['beta']**2	# dE here is in eV
				x[i] += dpp * dispersionx['etax0']
				xp[i] += dpp * dispersionx['etapx0']	
				y[i] += dpp * dispersiony['etay0']
				yp[i] += dpp * dispersiony['etapy0']	
				
				if outputFormat == 'Orbit':
					x[i] *= 1000.
					xp[i] *= 1000.
					y[i] *= 1000.
					yp[i] *= 1000.
					dE[i] /= 1.e9		# Convert dE from eV to GeV
					csv_writer.writerow([x[i], xp[i], y[i], yp[i], phi[i], dE[i]])
				#csv_writer.writerow([x[i], xp[i], y[i], yp[i], z[i], dE[i]])
		if summary_file:
			with open(summary_file, 'w') as fid:
				map(lambda key: fid.write(key + ' = ' + str(parameters[key]) + '\n'), parameters)
		print '\nCreated particle distribution with ' + str(parameters['n_macroparticles']) + ' macroparticles into file: ', output_file

	orbit_mpi.MPI_Barrier(comm)

	return output_file
def generate_initial_distribution_from_tomo(parameters, matfile=0, Lattice=None, output_file='ParticleDistribution.in', outputFormat='pyOrbit', summary_file='ParticleDistribution_summary.txt', summary_mat_file=None):
	
	# Get parameters from the lattice
	parameters['alphax0'] = Lattice.alphax0
	parameters['betax0']  = Lattice.betax0
	parameters['alphay0'] = Lattice.alphay0
	parameters['betay0']  = Lattice.betay0
	parameters['etax0']   = Lattice.etax0
	parameters['etapx0']  = Lattice.etapx0
	parameters['etay0']   = Lattice.etay0
	parameters['etapy0']  = Lattice.etapy0
	parameters['x0']      = Lattice.orbitx0
	parameters['xp0']     = Lattice.orbitpx0
	parameters['y0']      = Lattice.orbity0
	parameters['yp0']     = Lattice.orbitpy0
	parameters['gamma_transition'] = Lattice.gammaT
	parameters['circumference']    = Lattice.getLength()
	parameters['length'] = Lattice.getLength()/Lattice.nHarm
	
	# Create Twiss containers
	twissX = TwissContainer(alpha = parameters['alphax0'], beta = parameters['betax0'], emittance = parameters['epsn_x'] / parameters['gamma'] / parameters['beta'])
	twissY = TwissContainer(alpha = parameters['alphay0'], beta = parameters['betay0'], emittance = parameters['epsn_y'] / parameters['gamma'] / parameters['beta'])
	dispersionx = {'etax0': parameters['etax0'], 'etapx0': parameters['etapx0']}
	dispersiony = {'etay0': parameters['etay0'], 'etapy0': parameters['etapy0']}
	closedOrbitx = {'x0': parameters['x0'], 'xp0': parameters['xp0']} 
	closedOrbity = {'y0': parameters['y0'], 'yp0': parameters['yp0']} 

	# Initialize empty particle arrays
	x = np.zeros(parameters['n_macroparticles'])
	xp = np.zeros(parameters['n_macroparticles'])
	y = np.zeros(parameters['n_macroparticles'])
	yp = np.zeros(parameters['n_macroparticles'])
	z = np.zeros(parameters['n_macroparticles'])
	phi = np.zeros(parameters['n_macroparticles'])
	dE = np.zeros(parameters['n_macroparticles'])


	# Instatiate the classes for longitudinal and transverse distns
	Transverse_distribution = GaussDist2D(twissX, twissY, cut_off=parameters['TransverseCut'])
	Longitudinal_distribution = LongitudinalDistributionFromTomoscope(parameters['tomo_file'], matfile)

	try: 
		noise_level = parameters['noise_level']
	except KeyError:
		noise_level = 0	
		
	t_rand, dE_rand = Longitudinal_distribution.getCoordinates(parameters['n_macroparticles'], noise_level) 
	z = (t_rand * 1e-9) * speed_of_light * parameters['beta'] # convert ns to s and then m
	dE = dE_rand * 1e-3 # convert from MeV to GeV
	
	# We need to convert z into phi
	h_main = np.atleast_1d(parameters['harmonic_number'])[0]
	R = parameters['circumference'] / 2 / np.pi
	phi = - z * h_main / R

	# Write the distn to a file only on one CPU
	comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
	if orbit_mpi.MPI_Comm_rank(comm) == 0:
		
		with open(output_file,"w") as fid:
			
			csv_writer = csv.writer(fid, delimiter=' ')
			for i in range(parameters['n_macroparticles']):
				
				# ~ (z[i], dE[i]) = Longitudinal_distribution.getCoordinates()
				# ~ z[i] = z[i] * speed_of_light * parameters['beta'] * 1e-9 # convert ns to s and then m
				# ~ dE[i] = dE[i] * 1e-3 # convert from MeV to GeV
				(x[i], xp[i], y[i], yp[i]) = Transverse_distribution.getCoordinates()
				x[i] += closedOrbitx['x0']
				xp[i] += closedOrbitx['xp0']
				y[i] += closedOrbity['y0']
				yp[i] += closedOrbity['yp0']
				dpp = dE[i] / (parameters['energy']) / parameters['beta']**2 * 1E9 # dE is already in GeV - convert to eV
				print '\n dpp = ', dpp
				x[i] += dpp * dispersionx['etax0']
				xp[i] += dpp * dispersionx['etapx0']	
				y[i] += dpp * dispersiony['etay0']
				yp[i] += dpp * dispersiony['etapy0']	
				
				# ~ if outputFormat == 'Orbit':
				x[i] *= 1000.
				xp[i] *= 1000.
				y[i] *= 1000.
				yp[i] *= 1000.
				# ~ dE[i] /= 1.e9	# dE already converted to GeV
						
			# ~ if outputFormat == 'Orbit':
			map(lambda i: csv_writer.writerow([x[i], xp[i], y[i], yp[i], phi[i], dE[i]]), range(parameters['n_macroparticles']))	
			# ~ elif outputFormat == 'pyOrbit':
				# ~ map(lambda i: csv_writer.writerow([x[i], xp[i], y[i], yp[i], z[i], dE[i]]), range(parameters['n_macroparticles']))	
				
		if summary_file:
			with open(summary_file, 'w') as fid:
				map(lambda key: fid.write(key + ' = ' + str(parameters[key]) + '\n'), parameters)
				
		print '\nCreated particle distribution with ' + str(parameters['n_macroparticles']) + ' macroparticles into file: ', output_file

	orbit_mpi.MPI_Barrier(comm)

	return output_file
def generate_initial_distribution(
        parameters,
        Lattice=None,
        output_file='ParticleDistribution.in',
        outputFormat='pyOrbit',
        summary_file='ParticleDistribution_summary.txt',
        summary_mat_file=None):
    assert outputFormat in ['Orbit', 'pyOrbit']
    p = parameters
    beta = p['beta']
    gamma = p['gamma']
    if Lattice:
        p['alphax0'] = Lattice.alphax0
        p['betax0'] = Lattice.betax0
        p['alphay0'] = Lattice.alphay0
        p['betay0'] = Lattice.betay0
        p['etax0'] = Lattice.etax0
        p['etapx0'] = Lattice.etapx0
        p['etay0'] = Lattice.etay0
        p['etapy0'] = Lattice.etapy0
        p['x0'] = Lattice.orbitx0
        p['xp0'] = Lattice.orbitpx0
        p['y0'] = Lattice.orbity0
        p['yp0'] = Lattice.orbitpy0
        p['gamma_transition'] = Lattice.gammaT
        p['circumference'] = Lattice.getLength()

    # building the distributions
    eta = 1 / p['gamma_transition']**2 - 1 / p['gamma']**2
    R = p['circumference'] / 2 / np.pi
    beta = p['beta']
    energy = p['energy']
    phi_rf = p['phi_s']
    h = p['harmonic_number']
    h_main = np.atleast_1d(p['harmonic_number'])[0]
    rf_voltage = p['rf_voltage']
    RF = DoubleRF(R, eta, beta, energy, phi_rf, h, rf_voltage)
    Longitudinal_distribution = LongitudinalBinomialDistribution(
        RF, p['LongitudinalDistribution_z_max'],
        p['LongitudinalJohoParameter'])
    z, dpp = Longitudinal_distribution.getCoordinates(p['n_macroparticles'])

    z_arr, z_profile, z_rms, dp, dp_profile, dpp_rms = Longitudinal_distribution.getBunchProfile(
    )
    p['dpp_sigma'] = _GaussianFit(dp, dp_profile)[0][2]
    p['dpp_sigma_from_FWHM'] = _Gaussian_sigma_from_FWHM(dp, dp_profile)
    p['dpp_profile'] = np.array([dp, dp_profile])
    p['dpp_rms'] = dpp_rms
    p['linedensity_profile'] = np.array([z_arr, z_profile])
    phi = -z * h_main / R
    dE = dpp * p['energy'] * beta**2 * 1.e-9

    # transverse coordinates
    x, xp, y, yp = [], [], [], []
    for epsn_x, epsn_y, intensity in zip(np.atleast_1d(p['epsn_x']),
                                         np.atleast_1d(p['epsn_y']),
                                         np.atleast_1d(p['intensity'])):
        # twiss containers
        twissX = TwissContainer(alpha=p['alphax0'],
                                beta=p['betax0'],
                                emittance=epsn_x / gamma / beta)
        twissY = TwissContainer(alpha=p['alphay0'],
                                beta=p['betay0'],
                                emittance=epsn_y / gamma / beta)

        Transverse_distribution = GaussDist2D(twissX,
                                              twissY,
                                              cut_off=p['TransverseCut'])
        n_macroparticles_tmp = int(p['n_macroparticles'] *
                                   (intensity / np.sum(p['intensity'])))
        Transverse_coords = np.array(
            map(lambda i: Transverse_distribution.getCoordinates(),
                xrange(n_macroparticles_tmp)))
        x.extend(Transverse_coords[:, 0].tolist())
        xp.extend(Transverse_coords[:, 1].tolist())
        y.extend(Transverse_coords[:, 2].tolist())
        yp.extend(Transverse_coords[:, 3].tolist())
    # in case x has not yet a length of n_macroparticles
    while len(x) < p['n_macroparticles']:
        Transverse_coords = Transverse_distribution.getCoordinates()
        x.append(Transverse_coords[0])
        xp.append(Transverse_coords[1])
        y.append(Transverse_coords[2])
        yp.append(Transverse_coords[3])
    x = np.array(x) + p['x0'] + dpp * p['etax0']
    xp = np.array(xp) + p['xp0'] + dpp * p['etapx0']
    y = np.array(y) + p['y0'] + dpp * p['etay0']
    yp = np.array(yp) + p['yp0'] + dpp * p['etapy0']

    # only the main CPU is actually writing its distribution to a file ...
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
    if orbit_mpi.MPI_Comm_rank(comm) == 0:
        with open(output_file, "w") as fid:
            csv_writer = csv.writer(fid, delimiter=' ')
            if outputFormat == 'Orbit':
                x *= 1000.
                xp *= 1000.
                y *= 1000.
                yp *= 1000.
                map(
                    lambda i: csv_writer.writerow(
                        [x[i], xp[i], y[i], yp[i], phi[i], dE[i]]),
                    range(p['n_macroparticles']))
            elif outputFormat == 'pyOrbit':
                map(
                    lambda i: csv_writer.writerow(
                        [x[i], xp[i], y[i], yp[i], z[i], dE[i]]),
                    range(p['n_macroparticles']))

        if summary_file:
            with open(summary_file, 'w') as fid:
                map(lambda key: fid.write(key + ' = ' + str(p[key]) + '\n'), p)

        if summary_mat_file:
            with open(summary_mat_file, 'w') as fid:
                sio.savemat(fid, parameters)

        print '\nCreated particle distribution with ' + str(
            p['n_macroparticles']) + ' macroparticles into file: ', output_file

    orbit_mpi.MPI_Barrier(comm)

    return output_file
def generate_initial_poincare_distribution(n_sigma, parameters, Lattice, horizontal = 1,  output_file = 'Input/ParticleDistribution.in', summary_file = 'Input/ParticleDistribution_summary.txt', outputFormat='Orbit'):
        parameters['alphax0'] = Lattice.alphax0
	parameters['betax0']  = Lattice.betax0
	parameters['alphay0'] = Lattice.alphay0
	parameters['betay0']  = Lattice.betay0
	parameters['etax0']   = Lattice.etax0
	parameters['etapx0']  = Lattice.etapx0
	parameters['etay0']   = Lattice.etay0
	parameters['etapy0']  = Lattice.etapy0
	parameters['x0']      = Lattice.orbitx0
	parameters['xp0']     = Lattice.orbitpx0
	parameters['y0']      = Lattice.orbity0
	parameters['yp0']     = Lattice.orbitpy0
	parameters['gamma_transition'] = Lattice.gammaT
	parameters['circumference']    = Lattice.getLength()
	parameters['length'] = Lattice.getLength()/Lattice.nHarm
	# twiss containers
	twissX = TwissContainer(alpha = parameters['alphax0'], beta = parameters['betax0'], emittance = parameters['epsn_x'] / parameters['gamma'] / parameters['beta'])
	twissY = TwissContainer(alpha = parameters['alphay0'], beta = parameters['betay0'], emittance = parameters['epsn_y'] / parameters['gamma'] / parameters['beta'])
	dispersionx = {'etax0': parameters['beta']*parameters['etax0'], 'etapx0': parameters['beta']*parameters['etapx0']}
	dispersiony = {'etay0': parameters['beta']*parameters['etay0'], 'etapy0': parameters['beta']*parameters['etapy0']}
	closedOrbitx = {'x0': parameters['x0'], 'xp0': parameters['xp0']} 
	closedOrbity = {'y0': parameters['y0'], 'yp0': parameters['yp0']} 

	# initialize particle arrays
	x = np.zeros(parameters['n_macroparticles'])
	xp = np.zeros(parameters['n_macroparticles'])
	y = np.zeros(parameters['n_macroparticles'])
	yp = np.zeros(parameters['n_macroparticles'])
	phi = np.zeros(parameters['n_macroparticles'])
	dE = np.zeros(parameters['n_macroparticles'])

	# building the distributions
	# ~ Transverse_distribution = GaussDist2D(twissX, twissY, cut_off=parameters['TransverseCut'])
	Transverse_distribution = KVDist1D(twissX)
	Longitudinal_distribution = LongitudinalJohoDistributionSingleHarmonic(parameters, parameters['LongitudinalJohoParameter'])

	# only the main CPU is actually writing its distribution to a file ...
	comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
	if orbit_mpi.MPI_Comm_rank(comm) == 0:
		with open(output_file,"w") as fid:
			csv_writer = csv.writer(fid, delimiter=' ')


			for i in range(parameters['n_macroparticles']):
                                # RANDOM UNIFORM
                                # ~ x[i] = random.uniform(0., n_sigma) * np.sqrt(parameters['betax0'] * parameters['epsn_x'])
                                # EQUAL STEPS
                                if horizontal:
                                        # ~ print 'beta = ',parameters['beta'], ' gamma = ', parameters['gamma']
                                        x[i] = i * float(n_sigma/float(parameters['n_macroparticles'])) * np.sqrt(float(parameters['betax0']) * ( parameters['epsn_x'] / (parameters['beta'] * parameters['gamma'])))
                                elif not horizontal:
                                        # ~ print '\nVERTICAL BUNCH: n_sigma = ',n_sigma, ', sigma = ',  (np.sqrt(parameters['betay0'] * parameters['epsn_y']))
                                        # ~ print '\ty =', i * (n_sigma/parameters['n_macroparticles']) * np.sqrt(parameters['betay0'] * parameters['epsn_y'])
                                        # ~ print '\ti = ', i, ', betay0 = ',  parameters['betay0'], ', epsn_y = ', parameters['epsn_y'], ', macroparticles = ',  parameters['n_macroparticles']
                                        # ~ print '\tsqrt(bet*eps) = ', np.sqrt(parameters['betay0'] * parameters['epsn_y'])
                                        # ~ print '\tn_sigma/macro = ', float(n_sigma/float(parameters['n_macroparticles']))
                                        y[i] = i * float(n_sigma/float(parameters['n_macroparticles'])) * np.sqrt(float(parameters['betay0']) * ( parameters['epsn_y'] / (parameters['beta'] * parameters['gamma'])))

				if outputFormat == 'Orbit':
					x[i] *= 1000.
					xp[i] *= 1000.
					y[i] *= 1000.
					yp[i] *= 1000.
					dE[i] /= 1.e9
					csv_writer.writerow([x[i], xp[i], y[i], yp[i], phi[i], dE[i]])
				#csv_writer.writerow([x[i], xp[i], y[i], yp[i], z[i], dE[i]])
		if summary_file:
			with open(summary_file, 'w') as fid:
				map(lambda key: fid.write(key + ' = ' + str(parameters[key]) + '\n'), parameters)
		print '\nCreated particle distribution with ' + str(parameters['n_macroparticles']) + ' macroparticles into file: ', output_file


	orbit_mpi.MPI_Barrier(comm)

	return output_file
def saveBunchAsMatfile(bunch, filename=None):

    b = bunch
    #take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD
    comm = b.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size of the number of CPUs,
    # and have the number of macroparticles on each CPU
    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT,
                                          mpi_op.MPI_SUM, comm)

    mp_array = range(n_parts_arr[rank])
    particles = {}
    particles['x'] = map(b.x, mp_array)
    particles['xp'] = map(b.xp, mp_array)
    particles['y'] = map(b.y, mp_array)
    particles['yp'] = map(b.yp, mp_array)
    particles['z'] = map(b.z, mp_array)
    particles['dE'] = map(b.dE, mp_array)
    phase_space_keys = particles.keys()

    for attribute in b.getPartAttrNames():
        particles[attribute] = [[]
                                for i in range(b.getPartAttrSize(attribute))]
        for j in xrange(b.getPartAttrSize(attribute)):
            particles[attribute][j] += map(
                lambda i: b.partAttrValue(attribute, i, j), mp_array)

    #This is just for case. Actually, MPI_Barrier command is not necessary.
    orbit_mpi.MPI_Barrier(comm)

    for i_cpu in range(1, size):
        for key in phase_space_keys:
            if (rank == main_rank):
                #get the particle coordinates and attributes
                bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT,
                                                       i_cpu, 222, comm)
                if bunch_size_remote:
                    particles[key] += list(
                        np.atleast_1d(
                            orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu,
                                               222, comm)))
            elif (rank == i_cpu):
                #send the coordinate array if there are any particles ...
                bunch_size_local = bunch.getSize()
                orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT,
                                   main_rank, 222, comm)
                if bunch_size_local:
                    orbit_mpi.MPI_Send(particles[key], mpi_datatype.MPI_DOUBLE,
                                       main_rank, 222, comm)

    for i_cpu in range(1, size):
        for attribute in b.getPartAttrNames():
            if (rank == main_rank):
                bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT,
                                                       i_cpu, 222, comm)
                if bunch_size_remote:
                    #get the particle coordinates and attributes
                    for j in xrange(b.getPartAttrSize(attribute)):
                        particles[attribute][j] += list(
                            np.atleast_1d(
                                orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE,
                                                   i_cpu, 222, comm)))
            elif (rank == i_cpu):
                bunch_size_local = bunch.getSize()
                orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT,
                                   main_rank, 222, comm)
                if bunch_size_local:
                    #send the coordinate array if there are any particles ...
                    for j in xrange(b.getPartAttrSize(attribute)):
                        orbit_mpi.MPI_Send(particles[attribute][j],
                                           mpi_datatype.MPI_DOUBLE, main_rank,
                                           222, comm)

    bunchparameters = {'classical_radius': bunch.classicalRadius(), \
           'charge': bunch.charge(),
           'mass': bunch.mass(), \
           'momentum': bunch.getSyncParticle().momentum(), \
           'beta': bunch.getSyncParticle().beta(), \
           'gamma': bunch.getSyncParticle().gamma(), \
           'time': bunch.getSyncParticle().time()}

    if filename:
        if rank == main_rank:
            sio.savemat(filename + '.mat', {
                'particles': particles,
                'bunchparameters': bunchparameters
            },
                        do_compression=True)
    orbit_mpi.MPI_Barrier(comm)
 def save_to_matfile(self, filename):
     rank = orbit_mpi.MPI_Comm_rank(orbit_mpi.mpi_comm.MPI_COMM_WORLD)
     if not rank:
         sio.savemat(filename, self.output_dict)
     orbit_mpi.MPI_Barrier(orbit_mpi.mpi_comm.MPI_COMM_WORLD)
Пример #14
0
def BunchGather(bunch, turn, p, plot_footprint=False):

    b = bunch
    verbose = False

    # take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD
    comm = b.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size of the number of CPUs,
    # and have the number of macroparticles on each CPU
    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT,
                                          mpi_op.MPI_SUM, comm)

    if verbose:
        print 'BunchMoments:: bunch_size on MPI Rank: ', rank, ' = ', n_parts_arr[
            rank]
        print 'BunchMoments:: n_parts_arr on MPI Rank: ', rank, ' = ', n_parts_arr

    mp_array = range(n_parts_arr[rank])
    particles = {}
    particles['x'] = map(b.x, mp_array)
    particles['xp'] = map(b.xp, mp_array)
    particles['y'] = map(b.y, mp_array)
    particles['yp'] = map(b.yp, mp_array)
    particles['z'] = map(b.z, mp_array)
    particles['dE'] = map(b.dE, mp_array)
    phase_space_keys = particles.keys()

    for attribute in b.getPartAttrNames():
        particles[attribute] = [[]
                                for i in range(b.getPartAttrSize(attribute))]
        for j in xrange(b.getPartAttrSize(attribute)):
            particles[attribute][j] += map(
                lambda i: b.partAttrValue(attribute, i, j), mp_array)

    # This is just in case. Actually, MPI_Barrier command is not necessary.
    orbit_mpi.MPI_Barrier(comm)

    for i_cpu in range(1, size):
        for key in phase_space_keys:
            if (rank == main_rank):
                # get the particle coordinates and attributes
                bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT,
                                                       i_cpu, 222, comm)
                if bunch_size_remote:
                    particles[key] += list(
                        np.atleast_1d(
                            orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu,
                                               222, comm)))
            elif (rank == i_cpu):
                # send the coordinate array if there are any particles ...
                bunch_size_local = bunch.getSize()
                orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT,
                                   main_rank, 222, comm)
                if bunch_size_local:
                    orbit_mpi.MPI_Send(particles[key], mpi_datatype.MPI_DOUBLE,
                                       main_rank, 222, comm)

    for i_cpu in range(1, size):
        for attribute in b.getPartAttrNames():
            if (rank == main_rank):
                bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT,
                                                       i_cpu, 222, comm)
                if bunch_size_remote:
                    # get the particle coordinates and attributes
                    for j in xrange(b.getPartAttrSize(attribute)):
                        particles[attribute][j] += list(
                            np.atleast_1d(
                                orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE,
                                                   i_cpu, 222, comm)))
            elif (rank == i_cpu):
                bunch_size_local = bunch.getSize()
                orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT,
                                   main_rank, 222, comm)
                if bunch_size_local:
                    # send the coordinate array if there are any particles ...
                    for j in xrange(b.getPartAttrSize(attribute)):
                        orbit_mpi.MPI_Send(particles[attribute][j],
                                           mpi_datatype.MPI_DOUBLE, main_rank,
                                           222, comm)

    bunchparameters = {'classical_radius': bunch.classicalRadius(), \
           'charge': bunch.charge(),
           'mass': bunch.mass(), \
           'momentum': bunch.getSyncParticle().momentum(), \
           'beta': bunch.getSyncParticle().beta(), \
           'gamma': bunch.getSyncParticle().gamma(), \
           'time': bunch.getSyncParticle().time()}

    ########################################################################
    #                Plot tune footprint with histograms                   #
    ########################################################################

    if rank is main_rank:
        # ~ print 'Rank: ', rank
        if turn >= 0:
            # ~ print 'Turn: ', turn
            if plot_footprint:
                if verbose:
                    print 'BunchGather:: Plot tune footprint on rank', rank

                tunex = str(p['tunex'][0] + '.' + p['tunex'][1:])
                tuney = str(p['tuney'][0] + '.' + p['tuney'][1:])
                tunex_sav = str(p['tunex'][0] + 'p' + p['tunex'][1:])
                tuney_sav = str(p['tuney'][0] + 'p' + p['tuney'][1:])
                fontsize = 15

                qx = np.array(particles['ParticlePhaseAttributes'][2])
                qy = np.array(particles['ParticlePhaseAttributes'][3])

                qx[np.where(qx > 0.5)] -= 1
                qy[np.where((qy > 0.6) & (qx < 0.25))] -= 1

                print 'resonances'
                resonances = resonance_lines((5.75, 6.25), (5.75, 6.25),
                                             (1, 2, 3, 4), 10)
                fontsize = 17

                f, ax = plt.subplots(1, figsize=(6, 6))
                gridspec.GridSpec(3, 3)
                #f.subplots_adjust(hspace = 0)	# Horizontal spacing between subplots
                f.subplots_adjust(
                    wspace=0)  # Vertical spacing between subplots

                my_cmap = plt.cm.jet
                my_cmap.set_under('w', 1)

                r = resonances

                print 'title'
                title = str(tunex_sav + ' ' + tuney_sav + ' turn ' + str(turn))

                # First subplot
                print 'plot1'
                plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=1)
                plt.hist(6 + qx, bins=1000,
                         range=(r.Qx_min,
                                r.Qx_max))  #, norm=mcolors.PowerNorm(gamma))
                plt.ylabel('Frequency')
                plt.grid(which='both')
                plt.title(title, fontsize=fontsize)

                # Main plot
                print 'plot2'
                plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
                plt.hist2d(6 + qx,
                           6 + qy,
                           bins=1000,
                           cmap=my_cmap,
                           vmin=1,
                           range=[[r.Qx_min, r.Qx_max], [r.Qy_min, r.Qy_max]
                                  ])  #, norm=mcolors.PowerNorm(gamma))
                plt.xlabel(r'Q$_x$')
                plt.ylabel(r'Q$_y$')

                print 'plot_resonance'
                resonances.plot_resonance(f)

                # Second subplot
                print 'plot3'
                plt.subplot2grid((3, 3), (1, 2), colspan=1, rowspan=2)
                plt.hist(6 + qy,
                         bins=1000,
                         range=(r.Qy_min, r.Qy_max),
                         orientation=u'horizontal'
                         )  #, norm=mcolors.PowerNorm(gamma))
                plt.xlabel('Frequency')
                plt.grid(which='both')

                current_axis = plt.gca()
                #current_axis.axes.get_yaxis().set_visible(False)

                ax.xaxis.label.set_size(fontsize)
                ax.yaxis.label.set_size(fontsize)
                ax.tick_params(labelsize=fontsize)

                plt.tight_layout()
                savename = str('Tune_Footprints/' + tunex_sav + '_' +
                               tuney_sav + '_turn_' + str(turn) + '_hist.png')

                print 'savefig'
                f.savefig(savename, dpi=100)
                plt.close(f)

    outputs = dict()
    if rank is main_rank:
        x = np.array(particles['x'])
        xp = np.array(particles['xp'])
        y = np.array(particles['y'])
        yp = np.array(particles['yp'])
        z = np.array(particles['z'])
        dE = np.array(particles['dE'])

        mu_x = moment(x, 2)
        mu_xp = moment(xp, 2)
        mu_y = moment(y, 2)
        mu_yp = moment(yp, 2)
        mu_z = moment(z, 2)
        mu_dE = moment(dE, 2)

        sig_x = np.sqrt(mu_x)
        sig_xp = np.sqrt(mu_xp)
        sig_y = np.sqrt(mu_y)
        sig_yp = np.sqrt(mu_yp)
        sig_z = np.sqrt(mu_z)
        sig_dE = np.sqrt(mu_dE)

        x_6_sig = x[np.where((x >= -6 * sig_x) & (x <= 6 * sig_x))]
        xp_6_sig = xp[np.where((xp >= -6 * sig_xp) & (xp <= 6 * sig_xp))]
        y_6_sig = y[np.where((y >= -6 * sig_y) & (y <= 6 * sig_y))]
        yp_6_sig = yp[np.where((yp >= -6 * sig_yp) & (yp <= 6 * sig_yp))]
        z_6_sig = z[np.where((z >= -6 * sig_z) & (z <= 6 * sig_z))]
        dE_6_sig = dE[np.where((dE >= -6 * sig_dE) & (dE <= 6 * sig_dE))]

        # Later add something to cut large amplitude particles to reduce noise for kurtosis calculation
        outputs = {
            'Mu_x': mu_x,
            'Mu_xp': mu_xp,
            'Mu_y': mu_y,
            'Mu_yp': mu_yp,
            'Mu_z': mu_z,
            'Mu_dE': mu_dE,
            'Sig_x': sig_x,
            'Sig_xp': sig_xp,
            'Sig_y': sig_y,
            'Sig_yp': sig_yp,
            'Sig_z': sig_z,
            'Sig_dE': sig_dE,
            'Max_x': np.max(x),
            'Max_xp': np.max(xp),
            'Max_y': np.max(y),
            'Max_yp': np.max(yp),
            'Max_z': np.max(z),
            'Max_dE': np.max(dE),
            'Min_x': np.min(x),
            'Min_xp': np.min(xp),
            'Min_y': np.min(y),
            'Min_yp': np.min(yp),
            'Min_z': np.min(z),
            'Min_dE': np.min(dE),
            'Kurtosis_x': kurtosis(x, fisher=True, nan_policy='omit'),
            'Kurtosis_xp': kurtosis(xp, fisher=True, nan_policy='omit'),
            'Kurtosis_y': kurtosis(y, fisher=True, nan_policy='omit'),
            'Kurtosis_yp': kurtosis(yp, fisher=True, nan_policy='omit'),
            'Kurtosis_z': kurtosis(z, fisher=True, nan_policy='omit'),
            'Kurtosis_dE': kurtosis(dE, fisher=True, nan_policy='omit'),
            'Kurtosis_x_6sig': kurtosis(x_6_sig,
                                        fisher=True,
                                        nan_policy='omit'),
            'Kurtosis_xp_6sig': kurtosis(xp_6_sig,
                                         fisher=True,
                                         nan_policy='omit'),
            'Kurtosis_y_6sig': kurtosis(y_6_sig,
                                        fisher=True,
                                        nan_policy='omit'),
            'Kurtosis_yp_6sig': kurtosis(yp_6_sig,
                                         fisher=True,
                                         nan_policy='omit'),
            'Kurtosis_z_6sig': kurtosis(z_6_sig,
                                        fisher=True,
                                        nan_policy='omit'),
            'Kurtosis_dE_6sig': kurtosis(dE_6_sig,
                                         fisher=True,
                                         nan_policy='omit')
        }

    return outputs
def generate_initial_distribution_tomo_old(
        parameters,
        matfile=0,
        Lattice=None,
        output_file='ParticleDistribution.in',
        outputFormat='pyOrbit',
        summary_file='ParticleDistribution_summary.txt',
        summary_mat_file=None):
    assert outputFormat in ['Orbit', 'pyOrbit']
    p = parameters
    beta = p['beta']
    gamma = p['gamma']
    if Lattice:
        p['alphax0'] = Lattice.alphax0
        p['betax0'] = Lattice.betax0
        p['alphay0'] = Lattice.alphay0
        p['betay0'] = Lattice.betay0
        p['etax0'] = Lattice.etax0
        p['etapx0'] = Lattice.etapx0
        p['etay0'] = Lattice.etay0
        p['etapy0'] = Lattice.etapy0
        p['x0'] = Lattice.orbitx0
        p['xp0'] = Lattice.orbitpx0
        p['y0'] = Lattice.orbity0
        p['yp0'] = Lattice.orbitpy0
        p['gamma_transition'] = Lattice.gammaT
        p['circumference'] = Lattice.getLength()

    # building the distributions
    # eta = 1/p['gamma_transition']**2 - 1/p['gamma']**2
    # R = p['circumference']/2/np.pi
    # beta = p['beta']
    # energy = p['energy']
    # phi_rf = p['phi_s']
    # h = p['harmonic_number']
    # h_main = np.atleast_1d(p['harmonic_number'])[0]
    # rf_voltage = p['rf_voltage']
    # RF = DoubleRF(R, eta, beta, energy, phi_rf, h, rf_voltage)
    # Longitudinal_distribution = LongitudinalBinomialDistribution(RF, p['LongitudinalDistribution_z_max'], p['LongitudinalJohoParameter'])
    # z, dpp = Longitudinal_distribution.getCoordinates(p['n_macroparticles'])

    # building the distributions
    beta = p['beta']
    try:
        noise_level = p['noise_level']
    except KeyError:
        noise_level = 0

    # ~ Longitudinal_distribution = LongitudinalDistributionFromTomoscope(p['tomo_file'])
    Longitudinal_distribution = LongitudinalDistributionFromTomoscope(
        p['tomo_file'], matfile)

    # ~ Longitudinal_distribution.plot_Tomoscope_data()
    # ~ Longitudinal_distribution.plot_generated_distribution()

    t_rand, dE_rand = Longitudinal_distribution.getCoordinates(
        p['n_macroparticles'], noise_level)
    z = t_rand * speed_of_light * beta * 1e-9  # convert ns to s and then m
    # ~ z = (t_rand * 1e-9) * speed_of_light * beta * 0.075 # convert ns to s and then m
    dE = dE_rand * 1e-3  # convert from MeV to GeV
    dpp = dE / p['energy'] / 1.e-9 / beta**2
    # ~ dpp = dE / p['energy'] / beta**2  # Not sure which dpp definition is correct

    # h_main = np.atleast_1d(p['harmonic_number'])[0]
    # R = p['circumference']/2/np.pi
    # phi = - z * h_main / R

    # z_arr, z_profile, z_rms, dp, dp_profile, dpp_rms = Longitudinal_distribution.getBunchProfile()
    # p['dpp_sigma'] = _GaussianFit(dp, dp_profile)[0][2]
    # p['dpp_sigma_from_FWHM'] = _Gaussian_sigma_from_FWHM(dp, dp_profile)
    # p['dpp_profile'] = np.array([dp, dp_profile])
    # p['dpp_rms'] = dpp_rms
    # p['linedensity_profile'] = np.array([z_arr, z_profile])
    # phi = - z * h_main / R
    # dE = dpp * p['energy'] * beta**2 * 1.e-9

    # transverse coordinates
    x, xp, y, yp = [], [], [], []
    for epsn_x, epsn_y, intensity in zip(np.atleast_1d(p['epsn_x']),
                                         np.atleast_1d(p['epsn_y']),
                                         np.atleast_1d(p['intensity'])):
        # twiss containers
        twissX = TwissContainer(alpha=p['alphax0'],
                                beta=p['betax0'],
                                emittance=epsn_x / gamma / beta)
        twissY = TwissContainer(alpha=p['alphay0'],
                                beta=p['betay0'],
                                emittance=epsn_y / gamma / beta)

        Transverse_distribution = GaussDist2D(twissX,
                                              twissY,
                                              cut_off=p['TransverseCut'])
        n_macroparticles_tmp = int(p['n_macroparticles'] *
                                   (intensity / np.sum(p['intensity'])))
        Transverse_coords = np.array(
            map(lambda i: Transverse_distribution.getCoordinates(),
                xrange(n_macroparticles_tmp)))
        x.extend(Transverse_coords[:, 0].tolist())
        xp.extend(Transverse_coords[:, 1].tolist())
        y.extend(Transverse_coords[:, 2].tolist())
        yp.extend(Transverse_coords[:, 3].tolist())

    # in case x has not yet a length of n_macroparticles
    # ~ while len(x)<p['n_macroparticles']:
    # ~ Transverse_coords = Transverse_distribution.getCoordinates()
    # ~ x.append(Transverse_coords[0])
    # ~ xp.append(Transverse_coords[1])
    # ~ y.append(Transverse_coords[2])
    # ~ yp.append(Transverse_coords[3])

    # Dispersion and closed orbit
    x = np.array(x) + p['x0'] + dpp * p['etax0']
    xp = np.array(xp) + p['xp0'] + dpp * p['etapx0']
    y = np.array(y) + p['y0'] + dpp * p['etay0']
    yp = np.array(yp) + p['yp0'] + dpp * p['etapy0']

    # only the main CPU is actually writing its distribution to a file ...
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
    if orbit_mpi.MPI_Comm_rank(comm) == 0:
        with open(output_file, "w") as fid:
            csv_writer = csv.writer(fid, delimiter=' ')
            if outputFormat == 'Orbit':
                x *= 1000.
                xp *= 1000.
                y *= 1000.
                yp *= 1000.
                # ~ dE[i] /= 1.e9	# Already in the correct units
                map(
                    lambda i: csv_writer.writerow(
                        [x[i], xp[i], y[i], yp[i], phi[i], dE[i]]),
                    range(p['n_macroparticles']))
            elif outputFormat == 'pyOrbit':
                map(
                    lambda i: csv_writer.writerow(
                        [x[i], xp[i], y[i], yp[i], z[i], dE[i]]),
                    range(p['n_macroparticles']))

        if summary_file:
            with open(summary_file, 'w') as fid:
                map(lambda key: fid.write(key + ' = ' + str(p[key]) + '\n'), p)

        if summary_mat_file:
            with open(summary_mat_file, 'w') as fid:
                sio.savemat(fid, parameters)

        print '\nCreated particle distribution with ' + str(
            p['n_macroparticles']) + ' macroparticles into file: ', output_file

    orbit_mpi.MPI_Barrier(comm)

    return output_file
Пример #16
0
def bunch_pyorbit_to_orbit_nHarm(ringLength, nHarm, pyOrbitBunch, \
 name_of_orbit_mpi_bunch_file):
    """
	Translates pyORBIT bunch to ORBIT_MPI bunch, incorporating RF
	harmonic number, and dumps it into a file.
	The ring length should be defined in the input (in meters).
	Lines in bunch files:
	ORBIT_MPI: x[mm] xp[mrad] y[mm] yp[mrad] phi[rad] dE[GeV].
	pyORBIT:   x[m]  xp[rad]  y[m]  yp[rad]  z[m]     dE[GeV]
	"""
    pi2 = 2.0 * math.pi
    zfac = pi2 * nHarm / ringLength
    b = pyOrbitBunch
    # Take the MPI Communicator from bunch: it could be different
    # from MPI_COMM_WORLD
    comm = pyOrbitBunch.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size number of CPUs,
    # Contains the number of macroparticles on each CPU
    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, \
     mpi_datatype.MPI_INT, mpi_op.MPI_SUM, comm)

    file_out = None
    if (rank == main_rank):
        file_out = open(name_of_orbit_mpi_bunch_file, "w")

    if (rank == main_rank):
        for i in range(n_parts_arr[rank]):
            x = b.x(i) * 1000.
            px = b.px(i) * 1000.
            y = b.y(i) * 1000.
            py = b.py(i) * 1000.
            z = -(math.fmod(b.z(i) * zfac, pi2))
            if (z > math.pi):
                z = z - 2 * math.pi
            if (z < -math.pi):
                z = z + 2 * math.pi
            dE = b.dE(i)
            file_out.write(str(x) + " " + str(px) + " " + \
             str(y) + " " + str(py) + " "+ \
             str(z) + " " + str(dE) + "\n")

    # MPI_Barrier command is not necessary.
    orbit_mpi.MPI_Barrier(comm)

    val_arr = (0., 0., 0., 0., 0., 0.)

    for i_cpu in range(1, size):
        #Again, MPI_Barrier command is not necessary.
        orbit_mpi.MPI_Barrier(comm)
        for i in range(n_parts_arr[i_cpu]):
            if (rank == main_rank):
                # Get the coordinate array
                (x, px, y, py, z, dE) = orbit_mpi.MPI_Recv(\
                 mpi_datatype.MPI_DOUBLE, \
                 i_cpu, 222, comm)
                file_out.write(str(x) + " " + str(px) + \
                 " " + str(y) + " " + str(py) + \
                 " " + str(z) + " " + str(dE) + "\n")
            elif (rank == i_cpu):
                #send the coordinate array
                x = b.x(i) * 1000.
                px = b.px(i) * 1000.
                y = b.y(i) * 1000.
                py = b.py(i) * 1000.
                z = -(math.fmod(b.z(i) * zfac, pi2))
                if (z > math.pi):
                    z = z - 2 * math.pi
                if (z < -math.pi):
                    z = z + 2 * math.pi
                dE = b.dE(i)
                val_arr = (x, px, y, py, z, dE)
                orbit_mpi.MPI_Send(val_arr, \
                 mpi_datatype.MPI_DOUBLE, \
                 main_rank, 222, comm)

    if (rank == main_rank):
        file_out.close()
def generate_initial_5mm_distribution(half_range, parameters, Lattice, horizontal = 1,  output_file = 'Input/ParticleDistribution.in', summary_file = 'Input/ParticleDistribution_summary.txt', outputFormat='Orbit'):
	parameters['alphax0'] = Lattice.alphax0
	parameters['betax0']  = Lattice.betax0
	parameters['alphay0'] = Lattice.alphay0
	parameters['betay0']  = Lattice.betay0
	parameters['etax0']   = Lattice.etax0
	parameters['etapx0']  = Lattice.etapx0
	parameters['etay0']   = Lattice.etay0
	parameters['etapy0']  = Lattice.etapy0
	parameters['x0']      = Lattice.orbitx0
	parameters['xp0']     = Lattice.orbitpx0
	parameters['y0']      = Lattice.orbity0
	parameters['yp0']     = Lattice.orbitpy0
	parameters['gamma_transition'] = Lattice.gammaT
	parameters['circumference']    = Lattice.getLength()
	parameters['length'] = Lattice.getLength()/Lattice.nHarm
	# twiss containers
	twissX = TwissContainer(alpha = parameters['alphax0'], beta = parameters['betax0'], emittance = parameters['epsn_x'] / parameters['gamma'] / parameters['beta'])
	twissY = TwissContainer(alpha = parameters['alphay0'], beta = parameters['betay0'], emittance = parameters['epsn_y'] / parameters['gamma'] / parameters['beta'])
	dispersionx = {'etax0': parameters['beta']*parameters['etax0'], 'etapx0': parameters['beta']*parameters['etapx0']}
	dispersiony = {'etay0': parameters['beta']*parameters['etay0'], 'etapy0': parameters['beta']*parameters['etapy0']}
	closedOrbitx = {'x0': parameters['x0'], 'xp0': parameters['xp0']} 
	closedOrbity = {'y0': parameters['y0'], 'yp0': parameters['yp0']}
        
	# initialize particle arrays
	x = np.zeros(parameters['n_macroparticles'])
	xp = np.zeros(parameters['n_macroparticles'])
	y = np.zeros(parameters['n_macroparticles'])
	yp = np.zeros(parameters['n_macroparticles'])
	phi = np.zeros(parameters['n_macroparticles'])
	dE = np.zeros(parameters['n_macroparticles'])

        Longitudinal_distribution = LongitudinalJohoDistributionSingleHarmonic(parameters, parameters['LongitudinalJohoParameter'])

	# only the main CPU is actually writing its distribution to a file ...
	comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
	if orbit_mpi.MPI_Comm_rank(comm) == 0:
		with open(output_file,"w") as fid:
			csv_writer = csv.writer(fid, delimiter=' ')

			for i in range(parameters['n_macroparticles']):                                
				# ~ (phi[i], dE[i]) = Longitudinal_distribution.getCoordinates()
                                # EQUAL STEPS
                                if horizontal:
                                        x[i] = (5E-3 - half_range) + ( (i * (2*half_range))/float(parameters['n_macroparticles']) )
                                        # z = (-phi*L)/(2*pi)
                                        # phi = (-2*pi*z)/L
                                        phi[i] =  (-2*np.pi*2.5*parameters['blength_rms'])/parameters["length"]
                                
                                        print x[i], phi[i]
                                        
                                elif not horizontal:
                                        y[i] = (5E-3 - half_range) + ( (i * (2*half_range))/float(parameters['n_macroparticles']) )
                                        print y[i]
				if outputFormat == 'Orbit':
					x[i] *= 1000.
					xp[i] *= 1000.
					y[i] *= 1000.
					yp[i] *= 1000.
					dE[i] /= 1.e9
					csv_writer.writerow([x[i], xp[i], y[i], yp[i], phi[i], dE[i]])
		if summary_file:
			with open(summary_file, 'w') as fid:
				map(lambda key: fid.write(key + ' = ' + str(parameters[key]) + '\n'), parameters)
		print '\nCreated particle distribution with ' + str(parameters['n_macroparticles']) + ' macroparticles into file: ', output_file

	orbit_mpi.MPI_Barrier(comm)

	return output_file
Пример #18
0
def generate_initial_long_poincare_distribution(
        z_offset,
        parameters,
        Lattice,
        zero_particle=True,
        output_file='Input/ParticleDistribution.in',
        summary_file='Input/ParticleDistribution_summary.txt',
        outputFormat='Orbit'):

    parameters['alphax0'] = Lattice.alphax0
    parameters['betax0'] = Lattice.betax0
    parameters['alphay0'] = Lattice.alphay0
    parameters['betay0'] = Lattice.betay0
    parameters['etax0'] = Lattice.etax0
    parameters['etapx0'] = Lattice.etapx0
    parameters['etay0'] = Lattice.etay0
    parameters['etapy0'] = Lattice.etapy0
    parameters['x0'] = Lattice.orbitx0
    parameters['xp0'] = Lattice.orbitpx0
    parameters['y0'] = Lattice.orbity0
    parameters['yp0'] = Lattice.orbitpy0
    parameters['gamma_transition'] = Lattice.gammaT
    parameters['circumference'] = Lattice.getLength()
    parameters['length'] = Lattice.getLength() / Lattice.nHarm

    # twiss containers
    twissX = TwissContainer(alpha=parameters['alphax0'],
                            beta=parameters['betax0'],
                            emittance=parameters['epsn_x'] /
                            parameters['gamma'] / parameters['beta'])
    twissY = TwissContainer(alpha=parameters['alphay0'],
                            beta=parameters['betay0'],
                            emittance=parameters['epsn_y'] /
                            parameters['gamma'] / parameters['beta'])
    dispersionx = {
        'etax0': parameters['beta'] * parameters['etax0'],
        'etapx0': parameters['beta'] * parameters['etapx0']
    }
    dispersiony = {
        'etay0': parameters['beta'] * parameters['etay0'],
        'etapy0': parameters['beta'] * parameters['etapy0']
    }
    closedOrbitx = {'x0': parameters['x0'], 'xp0': parameters['xp0']}
    closedOrbity = {'y0': parameters['y0'], 'yp0': parameters['yp0']}

    # initialize particle arrays
    x = np.zeros(parameters['n_macroparticles'])
    xp = np.zeros(parameters['n_macroparticles'])
    y = np.zeros(parameters['n_macroparticles'])
    yp = np.zeros(parameters['n_macroparticles'])
    phi = np.zeros(parameters['n_macroparticles'])
    dE = np.zeros(parameters['n_macroparticles'])

    # building the distributions
    # ~ Transverse_distribution = GaussDist2D(twissX, twissY, cut_off=parameters['TransverseCut'])
    # ~ Transverse_distribution = KVDist1D(twissX)
    # ~ Longitudinal_distribution = LongitudinalJohoDistributionSingleHarmonic(parameters, parameters['LongitudinalJohoParameter'])

    # only the main CPU is actually writing its distribution to a file ...
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
    if orbit_mpi.MPI_Comm_rank(comm) == 0:
        with open(output_file, "w") as fid:
            csv_writer = csv.writer(fid, delimiter=' ')

            h_main = np.atleast_1d(parameters['harmonic_number'])[0]
            R = parameters['circumference'] / 2 / np.pi
            #phi = - z * h_main / R

            for i in range(parameters['n_macroparticles']):
                if zero_particle:
                    if i == 0: phi[i] = -z_offset * h_main / R
                else: phi[i] = i * -z_offset * h_main / R

                if outputFormat == 'Orbit':
                    x[i] *= 1000.
                    xp[i] *= 1000.
                    y[i] *= 1000.
                    yp[i] *= 1000.
                    dE[i] /= 1.e9
                    # ~ csv_writer.writerow([x[i], xp[i], y[i], yp[i], phi[i], dE[i]])
                csv_writer.writerow([x[i], xp[i], y[i], yp[i], phi[i], dE[i]])
        # ~ if summary_file:
        # ~ with open(summary_file, 'w') as fid:
        # ~ map(lambda key: fid.write(key + ' = ' + str(parameters[key]) + '\n'), parameters)
        print '\nCreated particle distribution with ' + str(
            parameters['n_macroparticles']
        ) + ' macroparticles into file: ', output_file

    orbit_mpi.MPI_Barrier(comm)

    return output_file
def generate_initial_distribution_from_BLonD_manual_Twiss(parameters, TwissDict, Lattice=None, output_file='ParticleDistribution.in', outputFormat='pyOrbit', summary_file='ParticleDistribution_summary.txt', summary_mat_file=None):

	# Get parameters from the TwissDict dictionary
	parameters['alphax0'] = TwissDict['alpha_x']
	parameters['betax0']  = TwissDict['beta_x']
	parameters['alphay0'] = TwissDict['alpha_y']
	parameters['betay0']  = TwissDict['beta_y']
	parameters['etax0']   = TwissDict['D_x']
	parameters['etapx0']  = TwissDict['D_xp']
	parameters['etay0']   = TwissDict['D_y']
	parameters['etapy0']  = TwissDict['D_yp']
	parameters['x0']      = TwissDict['x0']
	parameters['xp0']     = TwissDict['xp0']
	parameters['y0']      = TwissDict['y0']
	parameters['yp0']     = TwissDict['yp0']
	parameters['gamma_transition'] = TwissDict['gamma_transition']
	parameters['circumference']    = TwissDict['circumference']
	parameters['length'] = TwissDict['length']
	
	# Create Twiss containers
	twissX = TwissContainer(alpha = parameters['alphax0'], beta = parameters['betax0'], emittance = parameters['epsn_x'] / parameters['gamma'] / parameters['beta'])
	twissY = TwissContainer(alpha = parameters['alphay0'], beta = parameters['betay0'], emittance = parameters['epsn_y'] / parameters['gamma'] / parameters['beta'])
	dispersionx = {'etax0': parameters['etax0'], 'etapx0': parameters['etapx0']}
	dispersiony = {'etay0': parameters['etay0'], 'etapy0': parameters['etapy0']}
	closedOrbitx = {'x0': parameters['x0'], 'xp0': parameters['xp0']} 
	closedOrbity = {'y0': parameters['y0'], 'yp0': parameters['yp0']} 

	# Initialize empty particle arrays
	x = np.zeros(parameters['n_macroparticles'])
	xp = np.zeros(parameters['n_macroparticles'])
	y = np.zeros(parameters['n_macroparticles'])
	yp = np.zeros(parameters['n_macroparticles'])
	z = np.zeros(parameters['n_macroparticles'])
	phi = np.zeros(parameters['n_macroparticles'])
	dE = np.zeros(parameters['n_macroparticles'])


	# Instatiate the classes for longitudinal and transverse distns
	Transverse_distribution = GaussDist2D(twissX, twissY, cut_off=parameters['TransverseCut'])

        # Open BLonD file
        BLonD_data = np.load(parameters['BLonD_file'])        
        
        # Old iterative method
        # ~ for i in range(parameters['n_macroparticles']):
                # ~ try:
                        # ~ # Set co-ordinates
                        # ~ z[i] = BLonD_data['dz'][i]
                        # ~ phi[i] = -1 * z[i] * h_main / R
                        # ~ dE[i] = (BLonD_data['dE'][i] / 1E9) # in eV
                        # ~ print i, ': ', z[i]
                # ~ except IndexError:
                        # ~ print 'ERROR: pyOrbit_GenerateInitialDistribution::generate_initial_distribution_from_BLonD'
                        # ~ print parameters['BLonD_file'], ' does not contain enough particles to fill the bunch co-ordinates'
                        # ~ exit(0)
                        
        if len(BLonD_data['dz']) <= (parameters['n_macroparticles']-1):
                print 'generate_initial_distribution_from_BLonD::Error: input array length', len(BLonD_data['dz']), ' does not meet number of requested particles', parameters['n_macroparticles']
                exit(0)
        if len(BLonD_data['dE']) <= (parameters['n_macroparticles']-1):
                print 'generate_initial_distribution_from_BLonD::Error: input file length', len(BLonD_data['dE']), ' does not meet number of requested particles', parameters['n_macroparticles']
                exit(0)
                
        z = BLonD_data['dz']
        dE = (BLonD_data['dE']/ 1E9)
                        
	# We need to convert z into phi
	h_main = np.atleast_1d(parameters['harmonic_number'])[0]
	R = parameters['circumference'] / 2 / np.pi
	phi = - z * h_main / R

	# Write the distn to a file only on one CPU
	comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
	if orbit_mpi.MPI_Comm_rank(comm) == 0:
		
		with open(output_file,"w") as fid:
			
			csv_writer = csv.writer(fid, delimiter=' ')
			for i in range(parameters['n_macroparticles']):
                                phi[i] = -1 * z[i] * h_main / R
				# ~ (z[i], dE[i]) = Longitudinal_distribution.getCoordinates()
				# ~ z[i] = z[i] * speed_of_light * parameters['beta'] * 1e-9 # convert ns to s and then m
				# ~ dE[i] = dE[i] * 1e-3 # convert from MeV to GeV
				(x[i], xp[i], y[i], yp[i]) = Transverse_distribution.getCoordinates()
				x[i] += closedOrbitx['x0']
				xp[i] += closedOrbitx['xp0']
				y[i] += closedOrbity['y0']
				yp[i] += closedOrbity['yp0']
				dpp = dE[i] / (parameters['energy']) / parameters['beta']**2 * 1E9
				#print '\n dpp = ', dpp
				x[i] += dpp * dispersionx['etax0']
				xp[i] += dpp * dispersionx['etapx0']
				y[i] += dpp * dispersiony['etay0']
				yp[i] += dpp * dispersiony['etapy0']

				# ~ if outputFormat == 'Orbit':
				x[i] *= 1000.
				xp[i] *= 1000.
				y[i] *= 1000.
				yp[i] *= 1000.
				# ~ dE[i] /= 1.e9

			# ~ if outputFormat == 'Orbit':
			map(lambda i: csv_writer.writerow([x[i], xp[i], y[i], yp[i], phi[i], dE[i]]), range(parameters['n_macroparticles']))	
			# ~ elif outputFormat == 'pyOrbit':
				# ~ map(lambda i: csv_writer.writerow([x[i], xp[i], y[i], yp[i], z[i], dE[i]]), range(parameters['n_macroparticles']))	
				
		if summary_file:
			with open(summary_file, 'w') as fid:
				map(lambda key: fid.write(key + ' = ' + str(parameters[key]) + '\n'), parameters)
				
		print '\nCreated particle distribution with ' + str(parameters['n_macroparticles']) + ' macroparticles into file: ', output_file

	orbit_mpi.MPI_Barrier(comm)

	return output_file
Пример #20
0
def profiles(Bunch, coord, histogram, steps=100, Min=1.0, Max=-1.0):
    """
        Returns a profile for one of the following Bunch coordinates:
	x[m] xp[rad] y[m] yp[rad] z[m] dE[GeV]
	"""

    b = Bunch

    # Take the MPI Communicator from bunch: It could be
    # different from MPI_COMM_WORLD

    comm = Bunch.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size of the number of CPUs,
    # contains the number of macroparticles on each CPU

    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, \
                   mpi_datatype.MPI_INT,mpi_op.MPI_SUM,comm)

    partdat = []

    if (rank == main_rank):
        for i in range(n_parts_arr[rank]):
            if coord == "x":
                partdat.append(b.x(i))
            if coord == "px":
                partdat.append(b.px(i))
            if coord == "y":
                partdat.append(b.y(i))
            if coord == "py":
                partdat.append(b.py(i))
            if coord == "z":
                partdat.append(b.z(i))
            if coord == "dE":
                partdat.append(b.dE(i))

    # That is just for case.
    # Actually, MPI_Barrier command is not necessary.

    orbit_mpi.MPI_Barrier(comm)

    val_arr = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)

    for i_cpu in range(1, size):

        # Again, that is just for case.
        # Actually, MPI_Barrier command is not necessary.
        orbit_mpi.MPI_Barrier(comm)

        for i in range(n_parts_arr[i_cpu]):
            if (rank == main_rank):

                #get the coordinate array
                (x, px, y, py, z, dE) = \
                                            orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, \
                                            i_cpu, 222, comm)
                if coord == "x":
                    partdat.append(x)
                if coord == "px":
                    partdat.append(px)
                if coord == "y":
                    partdat.append(y)
                if coord == "py":
                    partdat.append(py)
                if coord == "z":
                    partdat.append(z)
                if coord == "dE":
                    partdat.append(dE)

            elif (rank == i_cpu):
                #send the coordinate array
                x = b.x(i)
                px = b.px(i)
                y = b.y(i)
                py = b.py(i)
                z = b.z(i)
                dE = b.dE(i)
                val_arr = (x, px, y, py, z, dE)
                orbit_mpi.MPI_Send(val_arr, \
                                            mpi_datatype.MPI_DOUBLE, main_rank, 222, comm)

    l = len(partdat)
    m = min(partdat)
    M = max(partdat)

    c = (M + m) / 2.0
    d = (M - m) * 1.1 / 2.0
    M = c + d
    m = c - d

    if Max > M:
        M = Max
    if Min < m:
        m = Min

    dx = (M - m) / steps

    grid = [m]
    prof = [0]
    for i in range(1, steps + 1):
        x = m + i * dx
        grid.append(x)
        prof.append(0)
    grid.append(M)
    prof.append(0)

    for n in range(l):
        i = (partdat[n] - m) / dx
        i = int(i)
        if i < 0:
            pass
        elif i > range(steps):
            pass
        else:
            frac = (partdat[n] - m) / dx % 1
            prof[i] = prof[i] + (1.0 - frac)
            prof[i + 1] = prof[i + 1] + frac

    sum = 0.0
    for i in range(steps + 1):
        sum = sum + prof[i]

    file_out = histogram
    if (rank == main_rank):
        file_out = open(histogram, "w")

        file_out.write("Min = " + str(m) + "  Max = " + \
                       str(M) + " steps = " + str(steps) + "\n")
        file_out.write("nParts = " + str(l) + " HistSum = " + \
                       str(sum) + "\n\n")

        for i in range(steps + 1):
            file_out.write(str(grid[i]) + "   " + \
                           str(prof[i]) + "\n")

    if (rank == main_rank):
        file_out.close()
Пример #21
0
rank = orbit_mpi.MPI_Comm_rank(comm)

#----------------------------------------------
# Create folder structure
#----------------------------------------------
from lib.mpi_helpers import mpi_mkdir_p
mpi_mkdir_p('input')
mpi_mkdir_p('output')
mpi_mkdir_p('lost')

#----------------------------------------------
# Generate Lattice (MADX + PTC)
#----------------------------------------------
if not rank:
	os.system("/afs/cern.ch/eng/sl/MAD-X/pro/releases/5.02.00/madx-linux64 < Input/SIS18.madx")
orbit_mpi.MPI_Barrier(comm)

#----------------------------------------------
# Initialize a Teapot-Style PTC lattice
#----------------------------------------------
PTC_File = "SIS_18_BENCHMARK.flt"
Lattice = PTC_Lattice("MACHINE")
Lattice.readPTC(PTC_File)
readScriptPTC('Input/time.ptc')

paramsDict = {}
paramsDict["length"]=Lattice.getLength()/Lattice.nHarm

#----------------------------------------------
# Add apertures
#----------------------------------------------
Пример #22
0
def bunch_pyorbit_to_orbit(ringLength, pyOrbitBunch,
                           name_of_orbit_mpi_bunch_file):
    """
	Translates pyORBIT bunch to ORBIT_MPI bunch and dumps it into the file.
	The ring length should be defined in the input (in meters).
	ORBIT_MPI file has lines: x[mm] xp[mrad] y[mm] yp[mrad]   phi[rad]  dE[GeV].
	pyORBIT: x[m] xp[rad] y[m] yp[rad]  z[m]  dE[GeV]
	"""
    pi2 = 2.0 * math.pi
    L = ringLength
    b = pyOrbitBunch
    #take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD
    comm = pyOrbitBunch.getMPIComm()
    rank = orbit_mpi.MPI_Comm_rank(comm)
    size = orbit_mpi.MPI_Comm_size(comm)
    main_rank = 0

    # n_parts_arr - array of size of the number of CPUs,
    # and have the number of macroparticles on each CPU
    n_parts_arr = [0] * size
    n_parts_arr[rank] = b.getSize()
    n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT,
                                          mpi_op.MPI_SUM, comm)

    file_out = None
    if (rank == main_rank):
        file_out = open(name_of_orbit_mpi_bunch_file, "w")

    if (rank == main_rank):
        for i in range(n_parts_arr[rank]):
            x = b.x(i) * 1000.
            px = b.px(i) * 1000.
            y = b.y(i) * 1000.
            py = b.py(i) * 1000.
            z = -(math.fmod(b.z(i) * pi2 / L, pi2))
            if (z > math.pi):
                z = z - 2 * math.pi
            if (z < -math.pi):
                z = z + 2 * math.pi
            dE = b.dE(i)
            file_out.write(
                str(x) + " " + str(px) + " " + str(y) + " " + str(py) + " " +
                str(z) + " " + str(dE) + "\n")

    #That is just for case. Actually, MPI_Barrier command is not necessary.
    orbit_mpi.MPI_Barrier(comm)

    val_arr = (0., 0., 0., 0., 0., 0.)

    for i_cpu in range(1, size):
        #Again, that is just for case. Actually, MPI_Barrier command is not necessary.
        orbit_mpi.MPI_Barrier(comm)
        for i in range(n_parts_arr[i_cpu]):
            if (rank == main_rank):
                #get the coordinate array
                (x, px, y, py, z,
                 dE) = orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu, 222,
                                          comm)
                file_out.write(
                    str(x) + " " + str(px) + " " + str(y) + " " + str(py) +
                    " " + str(z) + " " + str(dE) + "\n")
            elif (rank == i_cpu):
                #send the coordinate array
                x = b.x(i) * 1000.
                px = b.px(i) * 1000.
                y = b.y(i) * 1000.
                py = b.py(i) * 1000.
                z = -(math.fmod(b.z(i) * pi2 / L, pi2))
                if (z > math.pi):
                    z = z - 2 * math.pi
                if (z < -math.pi):
                    z = z + 2 * math.pi
                dE = b.dE(i)
                val_arr = (x, px, y, py, z, dE)
                orbit_mpi.MPI_Send(val_arr, mpi_datatype.MPI_DOUBLE, main_rank,
                                   222, comm)

    if (rank == main_rank):
        file_out.close()