Beispiel #1
0
    def setGaussDistributedDisplacementParameter(self,
                                                 key,
                                                 value,
                                                 cut_off_level=3.0,
                                                 comm=mpi_comm.MPI_COMM_WORLD):
        """
		Sets the random generated error value for a particular coordinate for all nodes.
		The cooridinate is defined by key parameter.
		"""
        value = abs(value)
        if (self.param_dict.has_key(key)):
            self.param_dict[key] = value
        else:
            msg = "Class CoordinateDisplacementNodesModification - key-value problem"
            msg = msg + os.linesep
            msg = msg + "Method setGaussDistributedDisplacementParameter:"
            msg = msg + os.linesep
            msg = msg + "You are trying to set value for key=" + key
            msg = msg + os.linesep
            msg = msg + "Keys could be only = (dx, dxp, dy, dyp, dz, dE)"
            msg = msg + os.linesep
            orbitFinalize(msg)
            return
        #---------------------------------------------------------------------
        for errCntrl in self.error_controllers:
            value_tmp = random.gauss(0., value)
            while (abs(value_tmp) > abs(value) * cut_off_level):
                value_tmp = random.gauss(0., value)
            main_rank = 0
            value_tmp = orbit_mpi.MPI_Bcast(value_tmp, mpi_datatype.MPI_DOUBLE,
                                            main_rank, comm)
            errCntrl.setDisplacementParameter(key, value_tmp)
Beispiel #2
0
    def getBunch(self,
                 nParticles=0,
                 distributorClass=WaterBagDist3D,
                 cut_off=-1.):
        """
		Returns the pyORBIT bunch with particular number of particles.
		"""
        comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
        rank = orbit_mpi.MPI_Comm_rank(comm)
        size = orbit_mpi.MPI_Comm_size(comm)
        data_type = mpi_datatype.MPI_DOUBLE
        main_rank = 0
        bunch = Bunch()
        self.bunch.copyEmptyBunchTo(bunch)
        macrosize = (self.beam_current * 1.0e-3 / self.bunch_frequency)
        macrosize /= (math.fabs(bunch.charge()) * self.si_e_charge)
        distributor = distributorClass(self.twiss[0], self.twiss[1],
                                       self.twiss[2], cut_off)
        bunch.getSyncParticle().time(0.)
        for i in range(nParticles):
            (x, xp, y, yp, z, dE) = distributor.getCoordinates()
            (x, xp, y, yp, z, dE) = orbit_mpi.MPI_Bcast(
                (x, xp, y, yp, z, dE), data_type, main_rank, comm)
            if (i % size == rank):
                bunch.addParticle(x, xp, y, yp, z, dE)
        nParticlesGlobal = bunch.getSizeGlobal()
        bunch.macroSize(macrosize / nParticlesGlobal)
        return bunch
Beispiel #3
0
	def addAxisField(cls,fl_name,dir_location = ""):
		"""
		This method add to the store the axis RF field for the RF gap node. 
		The dir_location string variable will be added to the fl_name to get
		the file name.
		Returns the axis RF field function.
		"""
		if(cls.static_axis_field_dict.has_key(fl_name)): 
			return cls.static_axis_field_dict[fl_name]
		comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
		data_type = mpi_datatype.MPI_DOUBLE
		rank = orbit_mpi.MPI_Comm_rank(comm)
		main_rank = 0
		x_arr = []
		y_arr = []
		if(rank == 0):
			fl_in = open(dir_location + fl_name,"r")
			lns = fl_in.readlines()
			fl_in.close()
			for ln in lns:
				res_arr = ln.split()
				if(len(res_arr) == 2):
					x = float(res_arr[0])
					y = float(res_arr[1])
					x_arr.append(x)		
					y_arr.append(y)	
		x_arr = orbit_mpi.MPI_Bcast(x_arr,data_type,main_rank,comm)
		y_arr = orbit_mpi.MPI_Bcast(y_arr,data_type,main_rank,comm)
		function = Function()
		for ind in range(len(x_arr)):
			function.add(x_arr[ind],y_arr[ind])
		#---- setting the const step (if function will allow it) 
		#---- will speed up function calculation later
		function.setConstStep(1)
		cls.static_axis_field_dict[fl_name] = function
		return function
Beispiel #4
0
    def setGaussDistributedRelativeFieldError(self,
                                              relative_error,
                                              cut_off_level=3.0,
                                              comm=mpi_comm.MPI_COMM_WORLD):
        """
		Sets the random generated field error for all bends. The same value for all registered bends.
		"""
        rel_err = random.gauss(0., relative_error)
        while (abs(rel_err) > abs(relative_error) * cut_off_level):
            rel_err = random.gauss(0., relative_error)
        main_rank = 0
        rel_err = orbit_mpi.MPI_Bcast(rel_err, mpi_datatype.MPI_DOUBLE,
                                      main_rank, comm)
        self.relative_field_change = rel_err
        self.updateErrorParameters()
Beispiel #5
0
    def setGaussDistributedAngle(self,
                                 angle,
                                 cut_off_level=3.0,
                                 comm=mpi_comm.MPI_COMM_WORLD):
        """
		Sets the random generated error angle for all nodes.
		"""
        for errCntrl in self.error_controllers:
            angle_tmp = random.gauss(0., angle)
            while (abs(angle_tmp) > abs(angle) * cut_off_level):
                angle_tmp = random.gauss(0., angle)
            main_rank = 0
            angle_tmp = orbit_mpi.MPI_Bcast(angle, mpi_datatype.MPI_DOUBLE,
                                            main_rank, comm)
            errCntrl.setRotationAngle(angle_tmp)
Beispiel #6
0
    def setGaussDistributedRealtiveErrors(self,
                                          relative_error,
                                          cut_off_level=3.0,
                                          comm=mpi_comm.MPI_COMM_WORLD):
        """
		Sets the random generated error field for all quads.
		"""
        for [quad, field_init] in self.quad_and_field_arr:
            rel_err = random.gauss(0., relative_error)
            while (abs(rel_err) > abs(relative_error) * cut_off_level):
                rel_err = random.gauss(0., relative_error)
            main_rank = 0
            rel_err = orbit_mpi.MPI_Bcast(rel_err, mpi_datatype.MPI_DOUBLE,
                                          main_rank, comm)
            field = field_init * (1.0 + rel_err)
            quad.setParam("dB/dr", field)
Beispiel #7
0
    def setGaussDistributedShiftLength(self,
                                       shift_length,
                                       cut_off_level=3.0,
                                       comm=mpi_comm.MPI_COMM_WORLD):
        """
		Sets the random generated error shift_length for all nodes.
		"""
        for errCntrl in self.error_controllers:
            shift_length_tmp = random.gauss(0., shift_length)
            while (abs(shift_length_tmp) > abs(shift_length) * cut_off_level):
                shift_length_tmp = random.gauss(0., shift_length)
            main_rank = 0
            shift_length_tmp = orbit_mpi.MPI_Bcast(shift_length,
                                                   mpi_datatype.MPI_DOUBLE,
                                                   main_rank, comm)
            errCntrl.setShiftLength(shift_length_tmp)
Beispiel #8
0
	def getBunch(self, nParticles, twissX, twissY, twissZ, cut_off = -1.):
		"""
		Returns the pyORBIT bunch with particular number of particles.
		"""
		(x0,xp0,y0,yp0,z0,dE0) = self.init_coords
		comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
		rank = orbit_mpi.MPI_Comm_rank(comm)
		size = orbit_mpi.MPI_Comm_size(comm)		
		data_type = mpi_datatype.MPI_DOUBLE		
		main_rank = 0		
		bunch = Bunch()
		self.bunch.copyEmptyBunchTo(bunch)		
		macrosize = (self.beam_current*1.0e-3/self.bunch_frequency)
		macrosize /= (math.fabs(bunch.charge())*self.si_e_charge)
		distributor = GaussDist3D(twissX,twissY,twissZ, cut_off)
		bunch.getSyncParticle().time(0.)	
		for i in range(nParticles):
			(x,xp,y,yp,z,dE) = distributor.getCoordinates()
			(x,xp,y,yp,z,dE) = orbit_mpi.MPI_Bcast((x,xp,y,yp,z,dE),data_type,main_rank,comm)
			if(i%size == rank):
				bunch.addParticle(x+x0,xp+xp0,y+y0,yp+yp0,z+z0,dE+dE0)
		nParticlesGlobal = bunch.getSizeGlobal()
		bunch.macroSize(macrosize/nParticlesGlobal)
		return bunch
Beispiel #9
0
	def addParticles(self):
		(xmin,xmax,ymin,ymax) = self.injectregion
	
		#adjusts number of particles injected according to varying pattern width
		if self.lDistFunc.name == "JohoLongitudinalPaint":
			self.lDistFunc.getCoordinates()
			self.npartsfloat = self.lDistFunc.frac_change*self.npartsfloat
			self.nparts = int(round(self.npartsfloat))
		
		rank = 0
		numprocs = 1
		
		mpi_init = orbit_mpi.MPI_Initialized()
		comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
		
		if(mpi_init):
			rank = orbit_mpi.MPI_Comm_rank(comm)
			numprocs = orbit_mpi.MPI_Comm_size(comm)
		
		nPartsGlobal = self.bunch.getSizeGlobal()
		
		if(self.nmaxmacroparticles > 0):
			if(nPartsGlobal >= self.nmaxmacroparticles):
				return
				
				#if((nTurnsDone % injectTurnInterval) != 0):
	#return
		
		x_rank0 = []
		xp_rank0 = []
		y_rank0 = []
		yp_rank0 = []
		z_rank0 = []
		dE_rank0 = []
		ninjected_rank0 = 0
		x_local = []
		xp_local = []
		y_local = []
		yp_local = []
		z_local = []
		dE_local = []
		ninjectedlocal  = 0

		if(rank == 0):
			for i in xrange(int(self.nparts)):
				(x,px) = self.xDistFunc.getCoordinates()
				(y,py) = self.yDistFunc.getCoordinates()
				(z,dE) = self.lDistFunc.getCoordinates()

				if((x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)):
					ninjectedlocal = ninjectedlocal + 1
					x_rank0.append(x)
					xp_rank0.append(px)
					y_rank0.append(y)
					yp_rank0.append(py)
					z_rank0.append(z)
					dE_rank0.append(dE)
					#self.bunch.addParticle(x,px,y,py,z,dE)
				else:
					self.lostbunch.addParticle(x,px,y,py,z,dE)
					#self.bunch.compress()
		
		ninjected = orbit_mpi.MPI_Bcast(ninjectedlocal, mpi_datatype.MPI_INT, 0, comm)
		x_local = orbit_mpi.MPI_Bcast(x_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		xp_local = orbit_mpi.MPI_Bcast(xp_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		y_local = orbit_mpi.MPI_Bcast(y_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		yp_local = orbit_mpi.MPI_Bcast(yp_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		z_local = orbit_mpi.MPI_Bcast(z_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		dE_local = orbit_mpi.MPI_Bcast(dE_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)

		n_remainder = ninjected % numprocs;
		n_inj_local = ninjected/numprocs;
	
		#inject the equal number of particles on each CPU
	
		i_start = rank * n_inj_local
		i_stop = (rank+1) * n_inj_local 
		for i in xrange(i_start, i_stop):
			self.bunch.addParticle(x_local[i],xp_local[i],y_local[i],yp_local[i],z_local[i],dE_local[i])
				
		n_max_index = numprocs * n_inj_local

		for i in xrange (n_remainder - 1):
			i_cpu = int( numprocs * random.random())
			orbit_mpi.MPI_Bcast(i_cpu, mpi_datatype.MPI_INT, 0,comm)
			if(rank == i_cpu):
				self.bunch.addParticle(x_local[i + 1 + n_max_index], xp_local[i + 1 + n_max_index], y_local[i + 1 + n_max_index],
									   yp_local[i + 1 + n_max_index],z_local[i + 1 + n_max_index], dE_local[i + 1 + n_max_index])
				n_inj_local = n_inj_local + 1
					
		#nInjectedMacros += n_inj_local;
		
		self.bunch.compress()
		self.lostbunch.compress()
(alphaY, betaY, emittY) = (2.92, 0.281, 3.74 * 1.0e-6)
(alphaZ, betaZ, emittZ) = (0.0, 117.0, 0.0166 * 1.0e-6)

#---- we artificially increase the emittances to see apertures effects
emittX *= 5.
emittY *= 10.

twissX = TwissContainer(alphaX, betaX, emittX)
twissY = TwissContainer(alphaY, betaY, emittY)
twissZ = TwissContainer(alphaZ, betaZ, emittZ)

distributor = WaterBagDist3D(twissX, twissY, twissZ)

for ind in range(N_particles):
    (x, xp, y, yp, z, dE) = distributor.getCoordinates()
    (x, xp, y, yp, z, dE) = orbit_mpi.MPI_Bcast((x, xp, y, yp, z, dE),
                                                data_type, main_rank, comm)
    if (ind % size == rank):
        bunch.addParticle(x, xp, y, yp, z, dE)

nParticlesGlobal = bunch.getSizeGlobal()
if (rank == 0):
    print "total number of particles =", nParticlesGlobal
bunch.macroSize(macrosize)

#set up design
accLattice.trackDesignBunch(bunch)

paramsDict = {}
lost_parts_bunch = Bunch()
paramsDict["lostbunch"] = lost_parts_bunch
Beispiel #11
0
    def addParticles(self):
        """
		Performs injections.
		"""
        #---- User can skip injection if self.nparts is zero
        if (self.nparts == 0): return
        random.seed(100)

        (xmin, xmax, ymin, ymax) = self.injectregion

        #adjusts number of particles injected according to varying pattern width
        if self.lDistFunc.name == "JohoLongitudinalPaint":
            self.lDistFunc.getCoordinates()
            self.npartsfloat = self.lDistFunc.frac_change * self.npartsfloat
            self.nparts = int(round(self.npartsfloat))

        rank = 0
        numprocs = 1

        mpi_init = orbit_mpi.MPI_Initialized()
        comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD

        if (mpi_init):
            rank = orbit_mpi.MPI_Comm_rank(comm)
            numprocs = orbit_mpi.MPI_Comm_size(comm)

        nPartsGlobal = self.bunch.getSizeGlobal()

        if (self.nmaxmacroparticles > 0):
            if (nPartsGlobal >= self.nmaxmacroparticles):
                return

        x_rank0 = []
        xp_rank0 = []
        y_rank0 = []
        yp_rank0 = []
        z_rank0 = []
        dE_rank0 = []
        ninjected_rank0 = 0
        x_local = []
        xp_local = []
        y_local = []
        yp_local = []
        z_local = []
        dE_local = []
        ninjectedlocal = 0

        if (rank == 0):
            for i in range(int(self.nparts)):
                (x, px) = self.xDistFunc.getCoordinates()
                (y, py) = self.yDistFunc.getCoordinates()
                (z, dE) = self.lDistFunc.getCoordinates()

                if ((x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)):
                    ninjectedlocal = ninjectedlocal + 1
                    x_rank0.append(x)
                    xp_rank0.append(px)
                    y_rank0.append(y)
                    yp_rank0.append(py)
                    z_rank0.append(z)
                    dE_rank0.append(dE)
                else:
                    self.addLostParticle(self.bunch, self.lostbunch, x, px, y,
                                         py, z, dE)

        nPartsLostGlobal = self.lostbunch.getSizeGlobal()
        nPartsTotalGlobal = nPartsGlobal + nPartsLostGlobal

        ninjected = orbit_mpi.MPI_Bcast(ninjectedlocal, mpi_datatype.MPI_INT,
                                        0, comm)
        x_local = orbit_mpi.MPI_Bcast(x_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                      comm)
        xp_local = orbit_mpi.MPI_Bcast(xp_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                       comm)
        y_local = orbit_mpi.MPI_Bcast(y_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                      comm)
        yp_local = orbit_mpi.MPI_Bcast(yp_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                       comm)
        z_local = orbit_mpi.MPI_Bcast(z_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                      comm)
        dE_local = orbit_mpi.MPI_Bcast(dE_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                       comm)

        n_remainder = ninjected % numprocs
        n_inj_local = ninjected / numprocs

        #---- inject the equal number of particles on each CPU
        i_start = rank * n_inj_local
        i_stop = (rank + 1) * n_inj_local
        for i in range(i_start, i_stop):
            particleId = nPartsTotalGlobal + i
            self.addInjectedParticle(self.bunch, x_local[i], xp_local[i],
                                     y_local[i], yp_local[i], z_local[i],
                                     dE_local[i], particleId)

        #---- inject the reminder of the particles
        n_max_index = numprocs * n_inj_local

        nPartsGlobal = self.bunch.getSizeGlobal()
        nPartsLostGlobal = self.lostbunch.getSizeGlobal()
        nPartsTotalGlobal = nPartsGlobal + nPartsLostGlobal

        for i in range(n_remainder):
            i_cpu = random.randint(0, numprocs - 1)
            i_cpu = orbit_mpi.MPI_Bcast(i_cpu, mpi_datatype.MPI_INT, 0, comm)
            if (rank == i_cpu):
                particleId = nPartsTotalGlobal + i
                self.addInjectedParticle(self.bunch, x_local[i + n_max_index],
                                         xp_local[i + n_max_index],
                                         y_local[i + n_max_index],
                                         yp_local[i + n_max_index],
                                         z_local[i + n_max_index],
                                         dE_local[i + n_max_index], particleId)
                #---- here n_inj_local is just for information for debugging
                n_inj_local = n_inj_local + 1

        self.bunch.compress()
        self.lostbunch.compress()
def bunch_orbit_to_pyorbit_nHarm(ringLength, nHarm, kineticEnergy, \
	name_of_orbit_mpi_bunch_file, pyOrbitBunch = None):
	"""
	Translates ORBIT_MPI bunch to pyORBIT bunch, incorporating
	RF harmonic number, and returns it.
	PyORBIT bunch needs the ring length (m) and energy, mass, and
	charge of the synchronous particle, but ORBIT_MPI does not 
	have it. So, this information is specified in pyOrbitBunch or
	it will be proton by default.
	Lines in bunch files:
	ORBIT_MPI: x[mm] xp[mrad] y[mm] yp[mrad] phi[rad] dE[GeV].
	pyORBIT:   x[m]  xp[rad]  y[m]  yp[rad]  z[m]     dE[GeV]
	"""
	zfac = ringLength / (2 * math.pi * nHarm)
	if(pyOrbitBunch == None):  pyOrbitBunch = Bunch()
	# Take the MPI Communicator from bunch: it could be different
	# from MPI_COMM_WORLD
	comm = pyOrbitBunch.getMPIComm()
	rank = orbit_mpi.MPI_Comm_rank(comm)
	size = orbit_mpi.MPI_Comm_size(comm)	
	main_rank = 0
	# We will operate on file only at the CPU with rank = 0
	file_in = None
	if(rank == main_rank):
		file_in = open(name_of_orbit_mpi_bunch_file,"r")

	pyOrbitBunch.getSyncParticle().kinEnergy(kineticEnergy)

	# Here we assign ln_nonempty = 1 for each CPU if the line
	# read by CPU with the rank = 0 was non-empty
	# Otherwise, ln_nonempty will be zero everywhere
	ln = None
	ln_nonempty = 0

	if(rank == main_rank):
		ln_nonempty = 0
		ln = file_in.readline().strip()
		if(len(ln) > 0):
			ln_nonempty = 1
	ln_nonempty = orbit_mpi.MPI_Bcast(ln_nonempty, \
		mpi_datatype.MPI_INT,main_rank,comm)

	var_arr = (0., 0., 0., 0., 0., 0.)
	
	n_count = 1

	while(ln_nonempty == 1):
		# The rank of CPU that will get the next particle.
		# Loops through all CPUs. 
		recv_rank = n_count % size	
		if(rank == main_rank):
			res_arr = ln.strip().split()
			x  =  float(res_arr[0]) / 1000.
			xp =  float(res_arr[1]) / 1000.
			y  =  float(res_arr[2]) / 1000.
			yp =  float(res_arr[3]) / 1000.
			z  = -float(res_arr[4]) * zfac
			dE =  float(res_arr[5])
			val_arr = (x, xp, y, yp, z, dE)
			# Send the information if rank = 0 is not
			# going to keep this particle
			if(recv_rank != main_rank):
				orbit_mpi.MPI_Send(val_arr, \
					mpi_datatype.MPI_DOUBLE, \
					recv_rank, 111, comm)
			else:
				pyOrbitBunch.addParticle(val_arr[0], \
					val_arr[1], val_arr[2], \
					val_arr[3], val_arr[4], \
					val_arr[5])
		if(rank == recv_rank and rank != main_rank):
			val_arr = orbit_mpi.MPI_Recv(\
				mpi_datatype.MPI_DOUBLE, \
				main_rank, 111, comm)
			pyOrbitBunch.addParticle(val_arr[0], \
				val_arr[1], val_arr[2], \
				val_arr[3], val_arr[4], \
				val_arr[5])
		# Let's again find out if we want to proceed 
		if(rank == main_rank):
			ln_nonempty = 0
			ln = file_in.readline().strip()
			if(len(ln) > 0):
				ln_nonempty = 1
		ln_nonempty = orbit_mpi.MPI_Bcast(ln_nonempty, \
			mpi_datatype.MPI_INT, main_rank, comm)				
		n_count += 1		

	if(rank == main_rank): file_in.close()
	return pyOrbitBunch
	def readFile(self,file_name):
		self.data_arr = []
		self.Zmin = 0.
		self.Zmax = 0.
		self.Rmin = 0.
		self.Rmax = 0.
		self.zSteps = 0
		self.rSteps = 0			
		rank = orbit_mpi.MPI_Comm_rank(mpi_comm.MPI_COMM_WORLD)
		main_rank = 0
		if(rank == 0):
			fl_in = open(file_name,"r")
			start_data = 0
			for ln in fl_in:
				res = ln.split()
				if(start_data == 0):
					if(ln.find("(Zmin,Rmin)") >= 0):
						zr_min_max = res[2][1:len(res[2])-1].split(",")
						self.Zmin = float(zr_min_max[0])
						self.Rmin = float(zr_min_max[1])
					if(ln.find("(Zmax,Rmax)") >= 0):
						zr_min_max = res[2][1:len(res[2])-1].split(",")
						self.Zmax = float(zr_min_max[0])
						self.Rmax = float(zr_min_max[1])	
					if(len(res) > 4 and res[0] == "Z" and res[1] == "and" and res[2] == "R"):
						self.zSteps = int(res[4])
						self.rSteps = int(res[5])
					if(len(res) == 6 and res[0] == '(cm)' and res[1] == '(cm)' and res[2] == '(MV/m)'):
						start_data = 1
				else:
					if(len(res) == 6):
						arr = []
						for st in res:
							arr.append(float(st))
						self.data_arr.append(arr)
					else:
						break
			fl_in.close()
		#------end of rank 0 actions
		n = len(self.data_arr)
		n = orbit_mpi.MPI_Bcast(n,mpi_datatype.MPI_INT,main_rank,mpi_comm.MPI_COMM_WORLD)
		self.zSteps = orbit_mpi.MPI_Bcast(self.zSteps,mpi_datatype.MPI_INT,main_rank,mpi_comm.MPI_COMM_WORLD)
		self.rSteps = orbit_mpi.MPI_Bcast(self.rSteps,mpi_datatype.MPI_INT,main_rank,mpi_comm.MPI_COMM_WORLD)
		self.Zmin = orbit_mpi.MPI_Bcast(self.Zmin,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
		self.Zmax = orbit_mpi.MPI_Bcast(self.Zmax,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
		self.Rmin = orbit_mpi.MPI_Bcast(self.Rmin,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
		self.Rmax = orbit_mpi.MPI_Bcast(self.Rmax,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
		if((self.zSteps+1)*(self.rSteps+1) != n):
			if(rank == 0):
				print "====================================================="
				print "SuperFish_3D_RF_FiledReader:"
				print "The file=",file_name," does not have a correct format!"
				print "Stop."
			sys.exit(1)
		for i in range(n):
			arr = [0.]*6
			if(rank == 0):
				arr = self.data_arr[i]
			arr = orbit_mpi.MPI_Bcast(arr,mpi_datatype.MPI_DOUBLE,main_rank,mpi_comm.MPI_COMM_WORLD)
			if(rank != 0):
				self.data_arr.append(arr)