def LinearRestoringForce(bunch, force): rank = 0 numprocs = 1 mpi_init = orbit_mpi.MPI_Initialized() comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD if (mpi_init): rank = orbit_mpi.MPI_Comm_rank(comm) numprocs = orbit_mpi.MPI_Comm_size(comm) nparts_arr_local = [] for i in range(numprocs): nparts_arr_local.append(0) nparts_arr_local[rank] = bunch.getSize() data_type = mpi_datatype.MPI_INT op = mpi_op.MPI_SUM nparts_arr = orbit_mpi.MPI_Allreduce(nparts_arr_local, data_type, op, comm) for i in range(bunch.getSize()): en = bunch.dE(i) en = en + bunch.z(i) * force bunch.dE(i, en) return
def getBunch(self, nParticles=0, distributorClass=WaterBagDist3D, cut_off=-1.): """ Returns the pyORBIT bunch with particular number of particles. """ comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) data_type = mpi_datatype.MPI_DOUBLE main_rank = 0 bunch = Bunch() self.bunch.copyEmptyBunchTo(bunch) macrosize = (self.beam_current * 1.0e-3 / self.bunch_frequency) macrosize /= (math.fabs(bunch.charge()) * self.si_e_charge) distributor = distributorClass(self.twiss[0], self.twiss[1], self.twiss[2], cut_off) bunch.getSyncParticle().time(0.) for i in range(nParticles): (x, xp, y, yp, z, dE) = distributor.getCoordinates() (x, xp, y, yp, z, dE) = orbit_mpi.MPI_Bcast( (x, xp, y, yp, z, dE), data_type, main_rank, comm) if (i % size == rank): bunch.addParticle(x, xp, y, yp, z, dE) nParticlesGlobal = bunch.getSizeGlobal() bunch.macroSize(macrosize / nParticlesGlobal) return bunch
def bunch_from_matfile(matfile): d = sio.loadmat(matfile, squeeze_me=True) p = dict((key, value) for (key, value) in map( lambda k: (k, d['particles'][k][()]), d['particles'].dtype.names)) attributes = list(set(p) - set(['x', 'xp', 'y', 'yp', 'z', 'dE'])) attributes.sort(key=str.lower) bunch = Bunch() bunch.classicalRadius(d['bunchparameters']['classical_radius']) bunch.charge(d['bunchparameters']['charge']) bunch.mass(d['bunchparameters']['mass']) bunch.getSyncParticle().momentum(d['bunchparameters']['momentum']) bunch.getSyncParticle().time(d['bunchparameters']['time']) x = np.atleast_1d(d['particles']['x'][()]) xp = np.atleast_1d(d['particles']['xp'][()]) y = np.atleast_1d(d['particles']['y'][()]) yp = np.atleast_1d(d['particles']['yp'][()]) z = np.atleast_1d(d['particles']['z'][()]) dE = np.atleast_1d(d['particles']['dE'][()]) n_part = len(x) import orbit_mpi comm = bunch.getMPIComm() rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) count = n_part / size remainder = n_part % size if (rank < remainder): i_start = rank * (count + 1) i_stop = i_start + count + 1 else: i_start = rank * count + remainder i_stop = i_start + count # print rank, i_start, i_stop map(lambda i: bunch.addParticle(x[i], xp[i], y[i], yp[i], z[i], dE[i]), xrange(i_start, i_stop)) orbit_mpi.MPI_Barrier(comm) for a in attributes: bunch.addPartAttr(a) a_size = bunch.getPartAttrSize(a) if a_size > 1: for j in xrange(a_size): map( lambda (ip, i): bunch.partAttrValue(a, ip, j, np.atleast_1d(p[a][j])[i]), enumerate(xrange(i_start, i_stop))) else: map( lambda (ip, i): bunch.partAttrValue(a, ip, 0, np.atleast_1d(p[a])[i]), enumerate(xrange(i_start, i_stop))) orbit_mpi.MPI_Barrier(comm) return bunch
def BeamEmittances(bunch, beta): com = bunch.getMPIComm() mpi_size = orbit_mpi.MPI_Comm_size(com) op = mpi_op.MPI_SUM data_type = mpi_datatype.MPI_DOUBLE N_part_loc = bunch.getSize() N_part_glob = bunch.getSizeGlobal() P = 0 for i in range(N_part_loc): P += bunch.pz(i) P = orbit_mpi.MPI_Allreduce(P, data_type, op, com) P = P / N_part_glob XP0 = 0 YP0 = 0 for i in range(N_part_loc): XP0 += bunch.px(i) / bunch.pz(i) YP0 += bunch.py(i) / bunch.pz(i) XP0 = orbit_mpi.MPI_Allreduce(XP0, data_type, op, com) YP0 = orbit_mpi.MPI_Allreduce(YP0, data_type, op, com) XP0 = XP0 / N_part_glob YP0 = YP0 / N_part_glob XP2 = 0 YP2 = 0 for i in range(N_part_loc): XP = bunch.px(i) / bunch.pz(i) - XP0 YP = bunch.py(i) / bunch.pz(i) - YP0 XP2 += XP * XP YP2 += YP * YP XP2 = orbit_mpi.MPI_Allreduce(XP2, data_type, op, com) YP2 = orbit_mpi.MPI_Allreduce(YP2, data_type, op, com) XP2 = XP2 / N_part_glob YP2 = YP2 / N_part_glob ex = XP2 * beta ey = YP2 * beta E = math.sqrt(P * P + bunch.mass() * bunch.mass()) beta_rel = P / E gamma_rel = 1. / math.sqrt(1 - beta_rel * beta_rel) exn = ex * beta_rel * gamma_rel eyn = ey * beta_rel * gamma_rel return exn, eyn
def getKinEnergy(bunch): op = mpi_op.MPI_SUM data_type = mpi_datatype.MPI_DOUBLE mpi_size = orbit_mpi.MPI_Comm_size(mpi_comm.MPI_COMM_WORLD) size = bunch.getSize() pz = 0 for i in range(size): pz += bunch.pz(i) pz /= size pz = orbit_mpi.MPI_Allreduce(pz, data_type, op, mpi_comm.MPI_COMM_WORLD) pz /= mpi_size Tk = math.sqrt(bunch.mass()**2 + pz**2) - bunch.mass() return Tk
def addParticleIdNumbers(b, fixedidnumber=-1, part_ind=0): rank = 0 numprocs = 1 mpi_init = orbit_mpi.MPI_Initialized() comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD if (mpi_init): rank = orbit_mpi.MPI_Comm_rank(comm) numprocs = orbit_mpi.MPI_Comm_size(comm) nparts_arr_local = [] for i in range(numprocs): nparts_arr_local.append(0) nparts_arr_local[rank] = b.getSize() data_type = mpi_datatype.MPI_INT op = mpi_op.MPI_SUM nparts_arr = orbit_mpi.MPI_Allreduce(nparts_arr_local, data_type, op, comm) if (b.hasPartAttr("ParticleIdNumber") == 0): b.addPartAttr("ParticleIdNumber") if (fixedidnumber >= 0): for i in range(part_ind, b.getSize()): b.partAttrValue("ParticleIdNumber", i, 0, fixedidnumber) else: istart = 0 if (rank == 0): istart = 0 else: for i in range(rank): istart = istart + nparts_arr[i] for i in range(b.getSize()): idnumber = istart + i b.partAttrValue("ParticleIdNumber", i, 0, idnumber)
def getBunch(self, nParticles, twissX, twissY, twissZ, cut_off = -1.): """ Returns the pyORBIT bunch with particular number of particles. """ (x0,xp0,y0,yp0,z0,dE0) = self.init_coords comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) data_type = mpi_datatype.MPI_DOUBLE main_rank = 0 bunch = Bunch() self.bunch.copyEmptyBunchTo(bunch) macrosize = (self.beam_current*1.0e-3/self.bunch_frequency) macrosize /= (math.fabs(bunch.charge())*self.si_e_charge) distributor = GaussDist3D(twissX,twissY,twissZ, cut_off) bunch.getSyncParticle().time(0.) for i in range(nParticles): (x,xp,y,yp,z,dE) = distributor.getCoordinates() (x,xp,y,yp,z,dE) = orbit_mpi.MPI_Bcast((x,xp,y,yp,z,dE),data_type,main_rank,comm) if(i%size == rank): bunch.addParticle(x+x0,xp+xp0,y+y0,yp+yp0,z+z0,dE+dE0) nParticlesGlobal = bunch.getSizeGlobal() bunch.macroSize(macrosize/nParticlesGlobal) return bunch
def saveBunchAsMatfile(bunch, filename=None): b = bunch #take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD comm = b.getMPIComm() rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) main_rank = 0 # n_parts_arr - array of size of the number of CPUs, # and have the number of macroparticles on each CPU n_parts_arr = [0] * size n_parts_arr[rank] = b.getSize() n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT, mpi_op.MPI_SUM, comm) mp_array = range(n_parts_arr[rank]) particles = {} particles['x'] = map(b.x, mp_array) particles['xp'] = map(b.xp, mp_array) particles['y'] = map(b.y, mp_array) particles['yp'] = map(b.yp, mp_array) particles['z'] = map(b.z, mp_array) particles['dE'] = map(b.dE, mp_array) phase_space_keys = particles.keys() for attribute in b.getPartAttrNames(): particles[attribute] = [[] for i in range(b.getPartAttrSize(attribute))] for j in xrange(b.getPartAttrSize(attribute)): particles[attribute][j] += map( lambda i: b.partAttrValue(attribute, i, j), mp_array) #This is just for case. Actually, MPI_Barrier command is not necessary. orbit_mpi.MPI_Barrier(comm) for i_cpu in range(1, size): for key in phase_space_keys: if (rank == main_rank): #get the particle coordinates and attributes bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT, i_cpu, 222, comm) if bunch_size_remote: particles[key] += list( np.atleast_1d( orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu, 222, comm))) elif (rank == i_cpu): #send the coordinate array if there are any particles ... bunch_size_local = bunch.getSize() orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT, main_rank, 222, comm) if bunch_size_local: orbit_mpi.MPI_Send(particles[key], mpi_datatype.MPI_DOUBLE, main_rank, 222, comm) for i_cpu in range(1, size): for attribute in b.getPartAttrNames(): if (rank == main_rank): bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT, i_cpu, 222, comm) if bunch_size_remote: #get the particle coordinates and attributes for j in xrange(b.getPartAttrSize(attribute)): particles[attribute][j] += list( np.atleast_1d( orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu, 222, comm))) elif (rank == i_cpu): bunch_size_local = bunch.getSize() orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT, main_rank, 222, comm) if bunch_size_local: #send the coordinate array if there are any particles ... for j in xrange(b.getPartAttrSize(attribute)): orbit_mpi.MPI_Send(particles[attribute][j], mpi_datatype.MPI_DOUBLE, main_rank, 222, comm) bunchparameters = {'classical_radius': bunch.classicalRadius(), \ 'charge': bunch.charge(), 'mass': bunch.mass(), \ 'momentum': bunch.getSyncParticle().momentum(), \ 'beta': bunch.getSyncParticle().beta(), \ 'gamma': bunch.getSyncParticle().gamma(), \ 'time': bunch.getSyncParticle().time()} if filename: if rank == main_rank: sio.savemat(filename + '.mat', { 'particles': particles, 'bunchparameters': bunchparameters }, do_compression=True) orbit_mpi.MPI_Barrier(comm)
def addParticles(self): (xmin,xmax,ymin,ymax) = self.injectregion #adjusts number of particles injected according to varying pattern width if self.lDistFunc.name == "JohoLongitudinalPaint": self.lDistFunc.getCoordinates() self.npartsfloat = self.lDistFunc.frac_change*self.npartsfloat self.nparts = int(round(self.npartsfloat)) rank = 0 numprocs = 1 mpi_init = orbit_mpi.MPI_Initialized() comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD if(mpi_init): rank = orbit_mpi.MPI_Comm_rank(comm) numprocs = orbit_mpi.MPI_Comm_size(comm) nPartsGlobal = self.bunch.getSizeGlobal() if(self.nmaxmacroparticles > 0): if(nPartsGlobal >= self.nmaxmacroparticles): return #if((nTurnsDone % injectTurnInterval) != 0): #return x_rank0 = [] xp_rank0 = [] y_rank0 = [] yp_rank0 = [] z_rank0 = [] dE_rank0 = [] ninjected_rank0 = 0 x_local = [] xp_local = [] y_local = [] yp_local = [] z_local = [] dE_local = [] ninjectedlocal = 0 if(rank == 0): for i in xrange(int(self.nparts)): (x,px) = self.xDistFunc.getCoordinates() (y,py) = self.yDistFunc.getCoordinates() (z,dE) = self.lDistFunc.getCoordinates() if((x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)): ninjectedlocal = ninjectedlocal + 1 x_rank0.append(x) xp_rank0.append(px) y_rank0.append(y) yp_rank0.append(py) z_rank0.append(z) dE_rank0.append(dE) #self.bunch.addParticle(x,px,y,py,z,dE) else: self.lostbunch.addParticle(x,px,y,py,z,dE) #self.bunch.compress() ninjected = orbit_mpi.MPI_Bcast(ninjectedlocal, mpi_datatype.MPI_INT, 0, comm) x_local = orbit_mpi.MPI_Bcast(x_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) xp_local = orbit_mpi.MPI_Bcast(xp_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) y_local = orbit_mpi.MPI_Bcast(y_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) yp_local = orbit_mpi.MPI_Bcast(yp_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) z_local = orbit_mpi.MPI_Bcast(z_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) dE_local = orbit_mpi.MPI_Bcast(dE_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) n_remainder = ninjected % numprocs; n_inj_local = ninjected/numprocs; #inject the equal number of particles on each CPU i_start = rank * n_inj_local i_stop = (rank+1) * n_inj_local for i in xrange(i_start, i_stop): self.bunch.addParticle(x_local[i],xp_local[i],y_local[i],yp_local[i],z_local[i],dE_local[i]) n_max_index = numprocs * n_inj_local for i in xrange (n_remainder - 1): i_cpu = int( numprocs * random.random()) orbit_mpi.MPI_Bcast(i_cpu, mpi_datatype.MPI_INT, 0,comm) if(rank == i_cpu): self.bunch.addParticle(x_local[i + 1 + n_max_index], xp_local[i + 1 + n_max_index], y_local[i + 1 + n_max_index], yp_local[i + 1 + n_max_index],z_local[i + 1 + n_max_index], dE_local[i + 1 + n_max_index]) n_inj_local = n_inj_local + 1 #nInjectedMacros += n_inj_local; self.bunch.compress() self.lostbunch.compress()
def BunchGather(bunch, turn, p, plot_footprint=False): b = bunch verbose = False # take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD comm = b.getMPIComm() rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) main_rank = 0 # n_parts_arr - array of size of the number of CPUs, # and have the number of macroparticles on each CPU n_parts_arr = [0] * size n_parts_arr[rank] = b.getSize() n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT, mpi_op.MPI_SUM, comm) if verbose: print 'BunchMoments:: bunch_size on MPI Rank: ', rank, ' = ', n_parts_arr[ rank] print 'BunchMoments:: n_parts_arr on MPI Rank: ', rank, ' = ', n_parts_arr mp_array = range(n_parts_arr[rank]) particles = {} particles['x'] = map(b.x, mp_array) particles['xp'] = map(b.xp, mp_array) particles['y'] = map(b.y, mp_array) particles['yp'] = map(b.yp, mp_array) particles['z'] = map(b.z, mp_array) particles['dE'] = map(b.dE, mp_array) phase_space_keys = particles.keys() for attribute in b.getPartAttrNames(): particles[attribute] = [[] for i in range(b.getPartAttrSize(attribute))] for j in xrange(b.getPartAttrSize(attribute)): particles[attribute][j] += map( lambda i: b.partAttrValue(attribute, i, j), mp_array) # This is just in case. Actually, MPI_Barrier command is not necessary. orbit_mpi.MPI_Barrier(comm) for i_cpu in range(1, size): for key in phase_space_keys: if (rank == main_rank): # get the particle coordinates and attributes bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT, i_cpu, 222, comm) if bunch_size_remote: particles[key] += list( np.atleast_1d( orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu, 222, comm))) elif (rank == i_cpu): # send the coordinate array if there are any particles ... bunch_size_local = bunch.getSize() orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT, main_rank, 222, comm) if bunch_size_local: orbit_mpi.MPI_Send(particles[key], mpi_datatype.MPI_DOUBLE, main_rank, 222, comm) for i_cpu in range(1, size): for attribute in b.getPartAttrNames(): if (rank == main_rank): bunch_size_remote = orbit_mpi.MPI_Recv(mpi_datatype.MPI_INT, i_cpu, 222, comm) if bunch_size_remote: # get the particle coordinates and attributes for j in xrange(b.getPartAttrSize(attribute)): particles[attribute][j] += list( np.atleast_1d( orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu, 222, comm))) elif (rank == i_cpu): bunch_size_local = bunch.getSize() orbit_mpi.MPI_Send(bunch_size_local, mpi_datatype.MPI_INT, main_rank, 222, comm) if bunch_size_local: # send the coordinate array if there are any particles ... for j in xrange(b.getPartAttrSize(attribute)): orbit_mpi.MPI_Send(particles[attribute][j], mpi_datatype.MPI_DOUBLE, main_rank, 222, comm) bunchparameters = {'classical_radius': bunch.classicalRadius(), \ 'charge': bunch.charge(), 'mass': bunch.mass(), \ 'momentum': bunch.getSyncParticle().momentum(), \ 'beta': bunch.getSyncParticle().beta(), \ 'gamma': bunch.getSyncParticle().gamma(), \ 'time': bunch.getSyncParticle().time()} ######################################################################## # Plot tune footprint with histograms # ######################################################################## if rank is main_rank: # ~ print 'Rank: ', rank if turn >= 0: # ~ print 'Turn: ', turn if plot_footprint: if verbose: print 'BunchGather:: Plot tune footprint on rank', rank tunex = str(p['tunex'][0] + '.' + p['tunex'][1:]) tuney = str(p['tuney'][0] + '.' + p['tuney'][1:]) tunex_sav = str(p['tunex'][0] + 'p' + p['tunex'][1:]) tuney_sav = str(p['tuney'][0] + 'p' + p['tuney'][1:]) fontsize = 15 qx = np.array(particles['ParticlePhaseAttributes'][2]) qy = np.array(particles['ParticlePhaseAttributes'][3]) qx[np.where(qx > 0.5)] -= 1 qy[np.where((qy > 0.6) & (qx < 0.25))] -= 1 print 'resonances' resonances = resonance_lines((5.75, 6.25), (5.75, 6.25), (1, 2, 3, 4), 10) fontsize = 17 f, ax = plt.subplots(1, figsize=(6, 6)) gridspec.GridSpec(3, 3) #f.subplots_adjust(hspace = 0) # Horizontal spacing between subplots f.subplots_adjust( wspace=0) # Vertical spacing between subplots my_cmap = plt.cm.jet my_cmap.set_under('w', 1) r = resonances print 'title' title = str(tunex_sav + ' ' + tuney_sav + ' turn ' + str(turn)) # First subplot print 'plot1' plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=1) plt.hist(6 + qx, bins=1000, range=(r.Qx_min, r.Qx_max)) #, norm=mcolors.PowerNorm(gamma)) plt.ylabel('Frequency') plt.grid(which='both') plt.title(title, fontsize=fontsize) # Main plot print 'plot2' plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2) plt.hist2d(6 + qx, 6 + qy, bins=1000, cmap=my_cmap, vmin=1, range=[[r.Qx_min, r.Qx_max], [r.Qy_min, r.Qy_max] ]) #, norm=mcolors.PowerNorm(gamma)) plt.xlabel(r'Q$_x$') plt.ylabel(r'Q$_y$') print 'plot_resonance' resonances.plot_resonance(f) # Second subplot print 'plot3' plt.subplot2grid((3, 3), (1, 2), colspan=1, rowspan=2) plt.hist(6 + qy, bins=1000, range=(r.Qy_min, r.Qy_max), orientation=u'horizontal' ) #, norm=mcolors.PowerNorm(gamma)) plt.xlabel('Frequency') plt.grid(which='both') current_axis = plt.gca() #current_axis.axes.get_yaxis().set_visible(False) ax.xaxis.label.set_size(fontsize) ax.yaxis.label.set_size(fontsize) ax.tick_params(labelsize=fontsize) plt.tight_layout() savename = str('Tune_Footprints/' + tunex_sav + '_' + tuney_sav + '_turn_' + str(turn) + '_hist.png') print 'savefig' f.savefig(savename, dpi=100) plt.close(f) outputs = dict() if rank is main_rank: x = np.array(particles['x']) xp = np.array(particles['xp']) y = np.array(particles['y']) yp = np.array(particles['yp']) z = np.array(particles['z']) dE = np.array(particles['dE']) mu_x = moment(x, 2) mu_xp = moment(xp, 2) mu_y = moment(y, 2) mu_yp = moment(yp, 2) mu_z = moment(z, 2) mu_dE = moment(dE, 2) sig_x = np.sqrt(mu_x) sig_xp = np.sqrt(mu_xp) sig_y = np.sqrt(mu_y) sig_yp = np.sqrt(mu_yp) sig_z = np.sqrt(mu_z) sig_dE = np.sqrt(mu_dE) x_6_sig = x[np.where((x >= -6 * sig_x) & (x <= 6 * sig_x))] xp_6_sig = xp[np.where((xp >= -6 * sig_xp) & (xp <= 6 * sig_xp))] y_6_sig = y[np.where((y >= -6 * sig_y) & (y <= 6 * sig_y))] yp_6_sig = yp[np.where((yp >= -6 * sig_yp) & (yp <= 6 * sig_yp))] z_6_sig = z[np.where((z >= -6 * sig_z) & (z <= 6 * sig_z))] dE_6_sig = dE[np.where((dE >= -6 * sig_dE) & (dE <= 6 * sig_dE))] # Later add something to cut large amplitude particles to reduce noise for kurtosis calculation outputs = { 'Mu_x': mu_x, 'Mu_xp': mu_xp, 'Mu_y': mu_y, 'Mu_yp': mu_yp, 'Mu_z': mu_z, 'Mu_dE': mu_dE, 'Sig_x': sig_x, 'Sig_xp': sig_xp, 'Sig_y': sig_y, 'Sig_yp': sig_yp, 'Sig_z': sig_z, 'Sig_dE': sig_dE, 'Max_x': np.max(x), 'Max_xp': np.max(xp), 'Max_y': np.max(y), 'Max_yp': np.max(yp), 'Max_z': np.max(z), 'Max_dE': np.max(dE), 'Min_x': np.min(x), 'Min_xp': np.min(xp), 'Min_y': np.min(y), 'Min_yp': np.min(yp), 'Min_z': np.min(z), 'Min_dE': np.min(dE), 'Kurtosis_x': kurtosis(x, fisher=True, nan_policy='omit'), 'Kurtosis_xp': kurtosis(xp, fisher=True, nan_policy='omit'), 'Kurtosis_y': kurtosis(y, fisher=True, nan_policy='omit'), 'Kurtosis_yp': kurtosis(yp, fisher=True, nan_policy='omit'), 'Kurtosis_z': kurtosis(z, fisher=True, nan_policy='omit'), 'Kurtosis_dE': kurtosis(dE, fisher=True, nan_policy='omit'), 'Kurtosis_x_6sig': kurtosis(x_6_sig, fisher=True, nan_policy='omit'), 'Kurtosis_xp_6sig': kurtosis(xp_6_sig, fisher=True, nan_policy='omit'), 'Kurtosis_y_6sig': kurtosis(y_6_sig, fisher=True, nan_policy='omit'), 'Kurtosis_yp_6sig': kurtosis(yp_6_sig, fisher=True, nan_policy='omit'), 'Kurtosis_z_6sig': kurtosis(z_6_sig, fisher=True, nan_policy='omit'), 'Kurtosis_dE_6sig': kurtosis(dE_6_sig, fisher=True, nan_policy='omit') } return outputs
def population(self): bunch_target = self.bunch_target mpi_size = orbit_mpi.MPI_Comm_size(mpi_comm.MPI_COMM_WORLD) bunch = self.bunch N_part = bunch.getSize() TK = self.TK n_step = self.n_step power = self.power n_states = self.n_states cut_par = self.cut_par sigmaZ_beam = self.sigmaZ_beam env_sigma = self.env_sigma Nevol = self.Nevol print_file = self.print_file if (self.method == 2): St = self.St if (self.method == 3): StSf = self.StSf if (self.method == 4): continuum_spectr = self.continuum_spectr fS = self.fS la = self.la fx = self.fx fy = self.fy wx = self.wx wy = self.wy rx = self.rx ry = self.ry ax = self.ax ay = self.ay method = self.method Bx = self.Bx By = self.By Bz = self.Bz ####### Here are defined parameters of the function ############### E = bunch.mass() + TK P = math.sqrt(E*E - bunch.mass()*bunch.mass()) vz = 299792458*P/E fS = ConstEMfield(0.,0.,0.,Bx,0.,0.) bunch_target.deleteAllParticles() bunch.copyBunchTo(bunch_target) if (method == 1): dip_transition = math.sqrt(256*math.pow(n_states,7)*math.pow(n_states-1,2*n_states-5)/3/math.pow(n_states+1,2*n_states+5)) delta_E = 1./2. - 1./(2.*n_states*n_states) if (method == 2): delta_E = 1./2. - 1./(2.*n_states*n_states) if (method == 3): delta_E = StSf.deltaE(bunch.mass(),0.,0.,0.,Bx,By,Bz,0.,0.,P) if (method == 4): delta_E = continuum_spectr.setField_returndE(bunch.mass(),0.,0.,0.,Bx,By,Bz,0.,0.,P) la0 = 2*math.pi*5.291772108e-11/7.297352570e-3/delta_E te = TK - bunch.mass()*(la/la0-1) kz = te/math.sqrt(P*P-te*te) #kz = math.tan(2*math.pi*(39.23-90)/360) print "angle = ",math.atan2(1,-kz)*360/2/math.pi, "kz = ", kz #kz = -1.22020387566 #print sigmaZ_beam zb = -5*sigmaZ_beam zl = zb*E/P time_step = (2*abs(zb)/vz)/n_step for i in range(N_part): z = bunch_target.z(i) bunch_target.z(i,z + zb) #bunch_target.dumpBunch("bunch_ini"+str(count)+".dat") LFS = HermiteGaussianLFmode(math.sqrt(power),0,0,abs(wx), abs(wy),fx,fy,la,zl,env_sigma) LFS.setLocalParameters(abs(rx), abs(ry),ax,ay) LFS.setLaserFieldOrientation(0.,0.,0., -1.,0.,kz, kz,0.,1., kz,0.,1.) #perpendicular polarization # LFS.setLaserFieldOrientation(0.,0.,0., -1.,0.,kz, kz,0.,1., 0.,1.,0.) #parallel polarization tracker = RungeKuttaTracker(0) if (method == 1): eff = TwoLevelAtom(LFS,delta_E,dip_transition) if (method == 2): eff = SchrodingerEquation(LFS,St,cut_par) if (method == 3): eff = TwoLevelStrongField(LFS, StSf) if (method == 4): eff = ContinuumSS(LFS,continuum_spectr) cont_eff = ExtEffectsContainer() cont_eff.AddEffect(eff) if(print_file): pr = PrintExtEffects("Populations",n_step,os.environ["ORBIT_ROOT"]+"/ext/laserstripping/working_dir/"+"/data3.0") cont_eff.AddEffect(pr) if(Nevol != 0): evo = RecordEvolution("Populations",0,Nevol) cont_eff.AddEffect(evo) tracker.track(bunch_target,0,time_step*n_step, time_step,fS,cont_eff) # for i in range(N_part): # z = bunch_target.z(i) # bunch_target.z(i,z - vz*time_step*n_step) # tracker.track(bunch_target,0,time_step*n_step, time_step,fS,cont_eff) population = 0. population2 = 0. p_ioniz = 0. p_ioniz2 = 0. for i in range(N_part): if (method != 4): val = 1 - bunch_target.partAttrValue("Populations",i,0) - bunch_target.partAttrValue("Populations",i,1) p_val = bunch_target.partAttrValue("Populations",i,0) population += val population2 += val*val p_ioniz += p_val p_ioniz2 += p_val*p_val else: val = 1 - bunch_target.partAttrValue("Populations",i,0) p_val = 1 - bunch_target.partAttrValue("Populations",i,0) population += val population2 += val*val p_ioniz += p_val p_ioniz2 += p_val*p_val op = mpi_op.MPI_SUM data_type = mpi_datatype.MPI_DOUBLE population = orbit_mpi.MPI_Allreduce(population,data_type,op,mpi_comm.MPI_COMM_WORLD) population2 = orbit_mpi.MPI_Allreduce(population2,data_type,op,mpi_comm.MPI_COMM_WORLD) p_ioniz = orbit_mpi.MPI_Allreduce(p_ioniz,data_type,op,mpi_comm.MPI_COMM_WORLD) p_ioniz2 = orbit_mpi.MPI_Allreduce(p_ioniz2,data_type,op,mpi_comm.MPI_COMM_WORLD) population = population/(mpi_size*N_part) p_ioniz = p_ioniz/(mpi_size*N_part) sigma_pop = 0. sigma_p_ioniz = 0. if(N_part*mpi_size > 1): sigma_pop = math.sqrt((population2 - N_part*mpi_size*population*population)/(N_part*mpi_size*(N_part*mpi_size - 1))) sigma_p_ioniz = math.sqrt((p_ioniz2 - N_part*mpi_size*p_ioniz*p_ioniz)/(N_part*mpi_size*(N_part*mpi_size - 1))) return population, sigma_pop, p_ioniz, sigma_p_ioniz
def bunch_pyorbit_to_orbit_nHarm(ringLength, nHarm, pyOrbitBunch, \ name_of_orbit_mpi_bunch_file): """ Translates pyORBIT bunch to ORBIT_MPI bunch, incorporating RF harmonic number, and dumps it into a file. The ring length should be defined in the input (in meters). Lines in bunch files: ORBIT_MPI: x[mm] xp[mrad] y[mm] yp[mrad] phi[rad] dE[GeV]. pyORBIT: x[m] xp[rad] y[m] yp[rad] z[m] dE[GeV] """ pi2 = 2.0 * math.pi zfac = pi2 * nHarm / ringLength b = pyOrbitBunch # Take the MPI Communicator from bunch: it could be different # from MPI_COMM_WORLD comm = pyOrbitBunch.getMPIComm() rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) main_rank = 0 # n_parts_arr - array of size number of CPUs, # Contains the number of macroparticles on each CPU n_parts_arr = [0] * size n_parts_arr[rank] = b.getSize() n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, \ mpi_datatype.MPI_INT, mpi_op.MPI_SUM, comm) file_out = None if (rank == main_rank): file_out = open(name_of_orbit_mpi_bunch_file, "w") if (rank == main_rank): for i in range(n_parts_arr[rank]): x = b.x(i) * 1000. px = b.px(i) * 1000. y = b.y(i) * 1000. py = b.py(i) * 1000. z = -(math.fmod(b.z(i) * zfac, pi2)) if (z > math.pi): z = z - 2 * math.pi if (z < -math.pi): z = z + 2 * math.pi dE = b.dE(i) file_out.write(str(x) + " " + str(px) + " " + \ str(y) + " " + str(py) + " "+ \ str(z) + " " + str(dE) + "\n") # MPI_Barrier command is not necessary. orbit_mpi.MPI_Barrier(comm) val_arr = (0., 0., 0., 0., 0., 0.) for i_cpu in range(1, size): #Again, MPI_Barrier command is not necessary. orbit_mpi.MPI_Barrier(comm) for i in range(n_parts_arr[i_cpu]): if (rank == main_rank): # Get the coordinate array (x, px, y, py, z, dE) = orbit_mpi.MPI_Recv(\ mpi_datatype.MPI_DOUBLE, \ i_cpu, 222, comm) file_out.write(str(x) + " " + str(px) + \ " " + str(y) + " " + str(py) + \ " " + str(z) + " " + str(dE) + "\n") elif (rank == i_cpu): #send the coordinate array x = b.x(i) * 1000. px = b.px(i) * 1000. y = b.y(i) * 1000. py = b.py(i) * 1000. z = -(math.fmod(b.z(i) * zfac, pi2)) if (z > math.pi): z = z - 2 * math.pi if (z < -math.pi): z = z + 2 * math.pi dE = b.dE(i) val_arr = (x, px, y, py, z, dE) orbit_mpi.MPI_Send(val_arr, \ mpi_datatype.MPI_DOUBLE, \ main_rank, 222, comm) if (rank == main_rank): file_out.close()
from spacecharge import SpaceChargeCalc2p5D, Boundary2D from orbit.aperture import TeapotApertureNode, CircleApertureNode, EllipseApertureNode, RectangleApertureNode from orbit.aperture import addTeapotApertureNode from bunch import BunchTwissAnalysis from orbit.diagnostics import TeapotStatLatsNode, TeapotMomentsNode, TeapotTuneAnalysisNode from orbit.diagnostics import addTeapotDiagnosticsNode from orbit.rf_cavities import RFNode, RFLatticeModifications import orbit_mpi from orbit_mpi import mpi_datatype from orbit_mpi import mpi_op import orbit comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) def addAperture2(lattice): addTeapotApertureNode(lattice, 0, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 0.5, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 1.5, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 2.1, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 2.16, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 3.05, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 3.6, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 3.84, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 4.41, CircleApertureNode(0.025)) addTeapotApertureNode(lattice, 5.610940777, CircleApertureNode(0.025))
def Freq_spread(bunch, la, n): delta_E = 1. / 2. - 1. / (2. * n * n) m = bunch.mass() op = mpi_op.MPI_SUM data_type = mpi_datatype.MPI_DOUBLE mpi_size = orbit_mpi.MPI_Comm_size(mpi_comm.MPI_COMM_WORLD) N = bunch.getSize() pz = 0 for i in range(N): pz += bunch.pz(i) pz = orbit_mpi.MPI_Allreduce(pz, data_type, op, mpi_comm.MPI_COMM_WORLD) pz = pz / (mpi_size * N) E = math.sqrt(pz * pz + m * m) TK = E - m la0 = 2 * math.pi * 5.291772108e-11 / 7.297352570e-3 / delta_E te = TK - m * (la / la0 - 1) kzz = te / math.sqrt(pz * pz - te * te) kx = -1. ky = 0. kz = kzz om = 0 om2 = 0 for i in range(N): px = bunch.px(i) py = bunch.py(i) pz = bunch.pz(i) P2 = px * px + py * py + pz * pz K = math.sqrt(kx * kx + ky * ky + kz * kz) P = math.sqrt(P2) E = math.sqrt(P2 + m * m) beta = P / E gamma = E / m cos = (px * kz + py * ky + pz * kz) / (K * P) la0 = la / (gamma * (1 - beta * cos)) omega = 2 * math.pi * 5.291772108e-11 / 7.297352570e-3 / la0 om += omega om2 += omega * omega # f = open('bunch_parameters.txt','a') # print >>f, omega # f.close() om = orbit_mpi.MPI_Allreduce(om, data_type, op, mpi_comm.MPI_COMM_WORLD) om = om / (mpi_size * N) om2 = orbit_mpi.MPI_Allreduce(om2, data_type, op, mpi_comm.MPI_COMM_WORLD) om2 = om2 / (mpi_size * N) sigma_om = math.sqrt(om2 - om**2) return (om, sigma_om)
def xyBeamEmittances(bunch): com = bunch.getMPIComm() mpi_size = orbit_mpi.MPI_Comm_size(com) op = mpi_op.MPI_SUM data_type = mpi_datatype.MPI_DOUBLE N_part_loc = bunch.getSize() N_part_glob = bunch.getSizeGlobal() P = 0 for i in range(N_part_loc): P += bunch.pz(i) P = orbit_mpi.MPI_Allreduce(P, data_type, op, com) P = P / N_part_glob XP0 = 0 YP0 = 0 X0 = 0 Y0 = 0 for i in range(N_part_loc): XP0 += bunch.px(i) / bunch.pz(i) YP0 += bunch.py(i) / bunch.pz(i) X0 += bunch.x(i) Y0 += bunch.y(i) XP0 = orbit_mpi.MPI_Allreduce(XP0, data_type, op, com) YP0 = orbit_mpi.MPI_Allreduce(YP0, data_type, op, com) X0 = orbit_mpi.MPI_Allreduce(X0, data_type, op, com) Y0 = orbit_mpi.MPI_Allreduce(Y0, data_type, op, com) XP0 = XP0 / N_part_glob YP0 = YP0 / N_part_glob X0 = X0 / N_part_glob Y0 = Y0 / N_part_glob XP2 = 0 YP2 = 0 X2 = 0 Y2 = 0 PXP = 0 PYP = 0 for i in range(N_part_loc): XP = bunch.px(i) / bunch.pz(i) - XP0 YP = bunch.py(i) / bunch.pz(i) - YP0 X = bunch.x(i) - X0 Y = bunch.y(i) - Y0 XP2 += XP * XP YP2 += YP * YP X2 += X * X Y2 += Y * Y PXP += XP * X PYP += YP * Y XP2 = orbit_mpi.MPI_Allreduce(XP2, data_type, op, com) YP2 = orbit_mpi.MPI_Allreduce(YP2, data_type, op, com) X2 = orbit_mpi.MPI_Allreduce(X2, data_type, op, com) Y2 = orbit_mpi.MPI_Allreduce(Y2, data_type, op, com) PXP = orbit_mpi.MPI_Allreduce(PXP, data_type, op, com) PYP = orbit_mpi.MPI_Allreduce(PYP, data_type, op, com) XP2 = XP2 / N_part_glob YP2 = YP2 / N_part_glob X2 = X2 / N_part_glob Y2 = Y2 / N_part_glob PXP = PXP / N_part_glob PYP = PYP / N_part_glob ex = math.sqrt(X2 * XP2 - PXP * PXP) ey = math.sqrt(Y2 * YP2 - PYP * PYP) E = math.sqrt(P * P + bunch.mass() * bunch.mass()) beta_rel = P / E gamma_rel = 1. / math.sqrt(1 - beta_rel * beta_rel) exn = ex * beta_rel * gamma_rel eyn = ey * beta_rel * gamma_rel print beta_rel * gamma_rel return exn, eyn
def profiles(Bunch, coord, histogram, steps=100, Min=1.0, Max=-1.0): """ Returns a profile for one of the following Bunch coordinates: x[m] xp[rad] y[m] yp[rad] z[m] dE[GeV] """ b = Bunch # Take the MPI Communicator from bunch: It could be # different from MPI_COMM_WORLD comm = Bunch.getMPIComm() rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) main_rank = 0 # n_parts_arr - array of size of the number of CPUs, # contains the number of macroparticles on each CPU n_parts_arr = [0] * size n_parts_arr[rank] = b.getSize() n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, \ mpi_datatype.MPI_INT,mpi_op.MPI_SUM,comm) partdat = [] if (rank == main_rank): for i in range(n_parts_arr[rank]): if coord == "x": partdat.append(b.x(i)) if coord == "px": partdat.append(b.px(i)) if coord == "y": partdat.append(b.y(i)) if coord == "py": partdat.append(b.py(i)) if coord == "z": partdat.append(b.z(i)) if coord == "dE": partdat.append(b.dE(i)) # That is just for case. # Actually, MPI_Barrier command is not necessary. orbit_mpi.MPI_Barrier(comm) val_arr = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0) for i_cpu in range(1, size): # Again, that is just for case. # Actually, MPI_Barrier command is not necessary. orbit_mpi.MPI_Barrier(comm) for i in range(n_parts_arr[i_cpu]): if (rank == main_rank): #get the coordinate array (x, px, y, py, z, dE) = \ orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, \ i_cpu, 222, comm) if coord == "x": partdat.append(x) if coord == "px": partdat.append(px) if coord == "y": partdat.append(y) if coord == "py": partdat.append(py) if coord == "z": partdat.append(z) if coord == "dE": partdat.append(dE) elif (rank == i_cpu): #send the coordinate array x = b.x(i) px = b.px(i) y = b.y(i) py = b.py(i) z = b.z(i) dE = b.dE(i) val_arr = (x, px, y, py, z, dE) orbit_mpi.MPI_Send(val_arr, \ mpi_datatype.MPI_DOUBLE, main_rank, 222, comm) l = len(partdat) m = min(partdat) M = max(partdat) c = (M + m) / 2.0 d = (M - m) * 1.1 / 2.0 M = c + d m = c - d if Max > M: M = Max if Min < m: m = Min dx = (M - m) / steps grid = [m] prof = [0] for i in range(1, steps + 1): x = m + i * dx grid.append(x) prof.append(0) grid.append(M) prof.append(0) for n in range(l): i = (partdat[n] - m) / dx i = int(i) if i < 0: pass elif i > range(steps): pass else: frac = (partdat[n] - m) / dx % 1 prof[i] = prof[i] + (1.0 - frac) prof[i + 1] = prof[i + 1] + frac sum = 0.0 for i in range(steps + 1): sum = sum + prof[i] file_out = histogram if (rank == main_rank): file_out = open(histogram, "w") file_out.write("Min = " + str(m) + " Max = " + \ str(M) + " steps = " + str(steps) + "\n") file_out.write("nParts = " + str(l) + " HistSum = " + \ str(sum) + "\n\n") for i in range(steps + 1): file_out.write(str(grid[i]) + " " + \ str(prof[i]) + "\n") if (rank == main_rank): file_out.close()
def bunch_orbit_to_pyorbit_nHarm(ringLength, nHarm, kineticEnergy, \ name_of_orbit_mpi_bunch_file, pyOrbitBunch = None): """ Translates ORBIT_MPI bunch to pyORBIT bunch, incorporating RF harmonic number, and returns it. PyORBIT bunch needs the ring length (m) and energy, mass, and charge of the synchronous particle, but ORBIT_MPI does not have it. So, this information is specified in pyOrbitBunch or it will be proton by default. Lines in bunch files: ORBIT_MPI: x[mm] xp[mrad] y[mm] yp[mrad] phi[rad] dE[GeV]. pyORBIT: x[m] xp[rad] y[m] yp[rad] z[m] dE[GeV] """ zfac = ringLength / (2 * math.pi * nHarm) if(pyOrbitBunch == None): pyOrbitBunch = Bunch() # Take the MPI Communicator from bunch: it could be different # from MPI_COMM_WORLD comm = pyOrbitBunch.getMPIComm() rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) main_rank = 0 # We will operate on file only at the CPU with rank = 0 file_in = None if(rank == main_rank): file_in = open(name_of_orbit_mpi_bunch_file,"r") pyOrbitBunch.getSyncParticle().kinEnergy(kineticEnergy) # Here we assign ln_nonempty = 1 for each CPU if the line # read by CPU with the rank = 0 was non-empty # Otherwise, ln_nonempty will be zero everywhere ln = None ln_nonempty = 0 if(rank == main_rank): ln_nonempty = 0 ln = file_in.readline().strip() if(len(ln) > 0): ln_nonempty = 1 ln_nonempty = orbit_mpi.MPI_Bcast(ln_nonempty, \ mpi_datatype.MPI_INT,main_rank,comm) var_arr = (0., 0., 0., 0., 0., 0.) n_count = 1 while(ln_nonempty == 1): # The rank of CPU that will get the next particle. # Loops through all CPUs. recv_rank = n_count % size if(rank == main_rank): res_arr = ln.strip().split() x = float(res_arr[0]) / 1000. xp = float(res_arr[1]) / 1000. y = float(res_arr[2]) / 1000. yp = float(res_arr[3]) / 1000. z = -float(res_arr[4]) * zfac dE = float(res_arr[5]) val_arr = (x, xp, y, yp, z, dE) # Send the information if rank = 0 is not # going to keep this particle if(recv_rank != main_rank): orbit_mpi.MPI_Send(val_arr, \ mpi_datatype.MPI_DOUBLE, \ recv_rank, 111, comm) else: pyOrbitBunch.addParticle(val_arr[0], \ val_arr[1], val_arr[2], \ val_arr[3], val_arr[4], \ val_arr[5]) if(rank == recv_rank and rank != main_rank): val_arr = orbit_mpi.MPI_Recv(\ mpi_datatype.MPI_DOUBLE, \ main_rank, 111, comm) pyOrbitBunch.addParticle(val_arr[0], \ val_arr[1], val_arr[2], \ val_arr[3], val_arr[4], \ val_arr[5]) # Let's again find out if we want to proceed if(rank == main_rank): ln_nonempty = 0 ln = file_in.readline().strip() if(len(ln) > 0): ln_nonempty = 1 ln_nonempty = orbit_mpi.MPI_Bcast(ln_nonempty, \ mpi_datatype.MPI_INT, main_rank, comm) n_count += 1 if(rank == main_rank): file_in.close() return pyOrbitBunch
def addParticles(self): """ Performs injections. """ #---- User can skip injection if self.nparts is zero if (self.nparts == 0): return random.seed(100) (xmin, xmax, ymin, ymax) = self.injectregion #adjusts number of particles injected according to varying pattern width if self.lDistFunc.name == "JohoLongitudinalPaint": self.lDistFunc.getCoordinates() self.npartsfloat = self.lDistFunc.frac_change * self.npartsfloat self.nparts = int(round(self.npartsfloat)) rank = 0 numprocs = 1 mpi_init = orbit_mpi.MPI_Initialized() comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD if (mpi_init): rank = orbit_mpi.MPI_Comm_rank(comm) numprocs = orbit_mpi.MPI_Comm_size(comm) nPartsGlobal = self.bunch.getSizeGlobal() if (self.nmaxmacroparticles > 0): if (nPartsGlobal >= self.nmaxmacroparticles): return x_rank0 = [] xp_rank0 = [] y_rank0 = [] yp_rank0 = [] z_rank0 = [] dE_rank0 = [] ninjected_rank0 = 0 x_local = [] xp_local = [] y_local = [] yp_local = [] z_local = [] dE_local = [] ninjectedlocal = 0 if (rank == 0): for i in range(int(self.nparts)): (x, px) = self.xDistFunc.getCoordinates() (y, py) = self.yDistFunc.getCoordinates() (z, dE) = self.lDistFunc.getCoordinates() if ((x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)): ninjectedlocal = ninjectedlocal + 1 x_rank0.append(x) xp_rank0.append(px) y_rank0.append(y) yp_rank0.append(py) z_rank0.append(z) dE_rank0.append(dE) else: self.addLostParticle(self.bunch, self.lostbunch, x, px, y, py, z, dE) nPartsLostGlobal = self.lostbunch.getSizeGlobal() nPartsTotalGlobal = nPartsGlobal + nPartsLostGlobal ninjected = orbit_mpi.MPI_Bcast(ninjectedlocal, mpi_datatype.MPI_INT, 0, comm) x_local = orbit_mpi.MPI_Bcast(x_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) xp_local = orbit_mpi.MPI_Bcast(xp_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) y_local = orbit_mpi.MPI_Bcast(y_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) yp_local = orbit_mpi.MPI_Bcast(yp_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) z_local = orbit_mpi.MPI_Bcast(z_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) dE_local = orbit_mpi.MPI_Bcast(dE_rank0, mpi_datatype.MPI_DOUBLE, 0, comm) n_remainder = ninjected % numprocs n_inj_local = ninjected / numprocs #---- inject the equal number of particles on each CPU i_start = rank * n_inj_local i_stop = (rank + 1) * n_inj_local for i in range(i_start, i_stop): particleId = nPartsTotalGlobal + i self.addInjectedParticle(self.bunch, x_local[i], xp_local[i], y_local[i], yp_local[i], z_local[i], dE_local[i], particleId) #---- inject the reminder of the particles n_max_index = numprocs * n_inj_local nPartsGlobal = self.bunch.getSizeGlobal() nPartsLostGlobal = self.lostbunch.getSizeGlobal() nPartsTotalGlobal = nPartsGlobal + nPartsLostGlobal for i in range(n_remainder): i_cpu = random.randint(0, numprocs - 1) i_cpu = orbit_mpi.MPI_Bcast(i_cpu, mpi_datatype.MPI_INT, 0, comm) if (rank == i_cpu): particleId = nPartsTotalGlobal + i self.addInjectedParticle(self.bunch, x_local[i + n_max_index], xp_local[i + n_max_index], y_local[i + n_max_index], yp_local[i + n_max_index], z_local[i + n_max_index], dE_local[i + n_max_index], particleId) #---- here n_inj_local is just for information for debugging n_inj_local = n_inj_local + 1 self.bunch.compress() self.lostbunch.compress()
fx=fy=fxy LFS=HermiteGaussianLFmode(math.sqrt(power),0,0,wx,wy,fx,fy,la) LFS.setLaserFieldOrientation(0.,0.,0., -1.,0.,kz, 1.,0.,1./kz, 0.,1.,0.) tracker = RungeKuttaTracker(1000.0) First = DensityMatrix(Stark,10000.,LFS) fS=LSFieldSource(0.,0.,0.,Bx,0.,0.) tracker.track(bunch_target,0,time_step*n_step, time_step,fS,First) bunch_target.dumpBunch("bunch_res"+str(count)+".dat") population = 0. population2 = 0 for i in range(bunch_target.getSize()): val = (1-bunch_target.partAttrValue("Amplitudes",i,1)) population += val population2 += val*val mpi_size = orbit_mpi.MPI_Comm_size(mpi_comm.MPI_COMM_WORLD) op = mpi_op.MPI_SUM data_type = mpi_datatype.MPI_DOUBLE population = orbit_mpi.MPI_Allreduce(population,data_type,op,mpi_comm.MPI_COMM_WORLD) population2 = orbit_mpi.MPI_Allreduce(population2,data_type,op,mpi_comm.MPI_COMM_WORLD) population = population/(mpi_size*N_part) sigma_pop = 0. if(N_part*mpi_size > 1): sigma_pop = math.sqrt((population2 - N_part*population*population)/(N_part*mpi_size*(N_part*mpi_size - 1))) time_live = orbit_mpi.MPI_Wtime() - time_start res = " %6.0f %4.1f %4.1f %4.1f %6.3f %6.3f "%(time_live,power/1.0e+6,1.0e+6*wx,fxy*100,population,sigma_pop) if(rank == 0): print "W [MW]= %4.1f wx [um] = %4.1f dist[cm]= %4.1f Population: %7.3f +- %7.3f"%(power/1.0e+6,1.0e+6*wx,fxy*100,population,sigma_pop) if(rank == 0): f_out = open(file_name,"a") f_out.write(str(count)+" "+res + "\n") f_out.close()
def bunch_pyorbit_to_orbit(ringLength, pyOrbitBunch, name_of_orbit_mpi_bunch_file): """ Translates pyORBIT bunch to ORBIT_MPI bunch and dumps it into the file. The ring length should be defined in the input (in meters). ORBIT_MPI file has lines: x[mm] xp[mrad] y[mm] yp[mrad] phi[rad] dE[GeV]. pyORBIT: x[m] xp[rad] y[m] yp[rad] z[m] dE[GeV] """ pi2 = 2.0 * math.pi L = ringLength b = pyOrbitBunch #take the MPI Communicator from bunch: it could be different from MPI_COMM_WORLD comm = pyOrbitBunch.getMPIComm() rank = orbit_mpi.MPI_Comm_rank(comm) size = orbit_mpi.MPI_Comm_size(comm) main_rank = 0 # n_parts_arr - array of size of the number of CPUs, # and have the number of macroparticles on each CPU n_parts_arr = [0] * size n_parts_arr[rank] = b.getSize() n_parts_arr = orbit_mpi.MPI_Allreduce(n_parts_arr, mpi_datatype.MPI_INT, mpi_op.MPI_SUM, comm) file_out = None if (rank == main_rank): file_out = open(name_of_orbit_mpi_bunch_file, "w") if (rank == main_rank): for i in range(n_parts_arr[rank]): x = b.x(i) * 1000. px = b.px(i) * 1000. y = b.y(i) * 1000. py = b.py(i) * 1000. z = -(math.fmod(b.z(i) * pi2 / L, pi2)) if (z > math.pi): z = z - 2 * math.pi if (z < -math.pi): z = z + 2 * math.pi dE = b.dE(i) file_out.write( str(x) + " " + str(px) + " " + str(y) + " " + str(py) + " " + str(z) + " " + str(dE) + "\n") #That is just for case. Actually, MPI_Barrier command is not necessary. orbit_mpi.MPI_Barrier(comm) val_arr = (0., 0., 0., 0., 0., 0.) for i_cpu in range(1, size): #Again, that is just for case. Actually, MPI_Barrier command is not necessary. orbit_mpi.MPI_Barrier(comm) for i in range(n_parts_arr[i_cpu]): if (rank == main_rank): #get the coordinate array (x, px, y, py, z, dE) = orbit_mpi.MPI_Recv(mpi_datatype.MPI_DOUBLE, i_cpu, 222, comm) file_out.write( str(x) + " " + str(px) + " " + str(y) + " " + str(py) + " " + str(z) + " " + str(dE) + "\n") elif (rank == i_cpu): #send the coordinate array x = b.x(i) * 1000. px = b.px(i) * 1000. y = b.y(i) * 1000. py = b.py(i) * 1000. z = -(math.fmod(b.z(i) * pi2 / L, pi2)) if (z > math.pi): z = z - 2 * math.pi if (z < -math.pi): z = z + 2 * math.pi dE = b.dE(i) val_arr = (x, px, y, py, z, dE) orbit_mpi.MPI_Send(val_arr, mpi_datatype.MPI_DOUBLE, main_rank, 222, comm) if (rank == main_rank): file_out.close()