Exemplo n.º 1
0
    def writeStatLats(self, s, bunch, lattlength=0):

        self.bunchtwissanalysis.analyzeBunch(bunch)
        emitx = self.bunchtwissanalysis.getEmittance(0)
        betax = self.bunchtwissanalysis.getBeta(0)
        alphax = self.bunchtwissanalysis.getAlpha(0)
        betay = self.bunchtwissanalysis.getBeta(1)
        alphay = self.bunchtwissanalysis.getAlpha(1)
        emity = self.bunchtwissanalysis.getEmittance(1)
        dispersionx = self.bunchtwissanalysis.getDispersion(0)
        ddispersionx = self.bunchtwissanalysis.getDispersionDerivative(0)
        #dispersiony = self.bunchtwissanalysis.getDispersion(1, bunch)
        #ddispersiony = self.bunchtwissanalysis.getDispersionDerivative(1, bunch)

        sp = bunch.getSyncParticle()
        time = sp.time()

        if lattlength > 0:
            time = sp.time() / (lattlength / (sp.beta() * speed_of_light))

        # if mpi operations are enabled, this section of code will
        # determine the rank of the present node
        rank = 0  # default is primary node
        mpi_init = orbit_mpi.MPI_Initialized()
        comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
        if (mpi_init):
            rank = orbit_mpi.MPI_Comm_rank(comm)

        # only the primary node needs to output the calculated information
        if (rank == 0):
            self.file_out.write(
                str(s) + "\t" + str(time) + "\t" + str(emitx) + "\t" +
                str(emity) + "\t" + str(betax) + "\t" + str(betay) + "\t" +
                str(alphax) + "\t" + str(alphay) + "\t" + str(dispersionx) +
                "\t" + str(ddispersionx) + "\n")
Exemplo n.º 2
0
    def writeMoments(self, s, bunch, lattlength=0):

        sp = bunch.getSyncParticle()
        time = sp.time()

        if lattlength > 0:
            time = sp.time() / (lattlength / (sp.beta() * speed_of_light))

        self.bunchtwissanalysis.computeBunchMoments(bunch, self.order,
                                                    self.dispterm,
                                                    self.emitnormterm)

        # if mpi operations are enabled, this section of code will
        # determine the rank of the present node
        rank = 0  # default is primary node
        mpi_init = orbit_mpi.MPI_Initialized()
        comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
        if (mpi_init):
            rank = orbit_mpi.MPI_Comm_rank(comm)

        # only the primary node needs to output the calculated information
        if (rank == 0):
            self.file_out.write(str(s) + "\t" + str(time) + "\t")
            for i in range(0, self.order + 1):
                for j in range(0, i + 1):
                    self.file_out.write(
                        str(self.bunchtwissanalysis.getBunchMoment(i - j, j)) +
                        "\t")
            self.file_out.write("\n")
def LinearRestoringForce(bunch, force):

    rank = 0
    numprocs = 1

    mpi_init = orbit_mpi.MPI_Initialized()
    comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD

    if (mpi_init):
        rank = orbit_mpi.MPI_Comm_rank(comm)
        numprocs = orbit_mpi.MPI_Comm_size(comm)

    nparts_arr_local = []
    for i in range(numprocs):
        nparts_arr_local.append(0)

    nparts_arr_local[rank] = bunch.getSize()
    data_type = mpi_datatype.MPI_INT
    op = mpi_op.MPI_SUM

    nparts_arr = orbit_mpi.MPI_Allreduce(nparts_arr_local, data_type, op, comm)

    for i in range(bunch.getSize()):
        en = bunch.dE(i)

        en = en + bunch.z(i) * force

        bunch.dE(i, en)

    return
Exemplo n.º 4
0
    def analyzeSignal(self, bunch):

        self.bunchtwissanalysis.analyzeBunch(bunch)

        # if mpi operations are enabled, this section of code will
        # determine the rank of the present node
        rank = 0  # default is primary node
        mpi_init = orbit_mpi.MPI_Initialized()
        comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
        if (mpi_init):
            rank = orbit_mpi.MPI_Comm_rank(comm)

        # only the primary node needs to output the calculated information
        if (rank == 0):
            self.xAvg = self.bunchtwissanalysis.getAverage(0)
            self.xpAvg = self.bunchtwissanalysis.getAverage(1)
            self.yAvg = self.bunchtwissanalysis.getAverage(2)
            self.ypAvg = self.bunchtwissanalysis.getAverage(3)
Exemplo n.º 5
0
    def addParticleIdNumbers(b, fixedidnumber=-1, part_ind=0):

        rank = 0
        numprocs = 1

        mpi_init = orbit_mpi.MPI_Initialized()
        comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD

        if (mpi_init):
            rank = orbit_mpi.MPI_Comm_rank(comm)
            numprocs = orbit_mpi.MPI_Comm_size(comm)

        nparts_arr_local = []
        for i in range(numprocs):
            nparts_arr_local.append(0)

        nparts_arr_local[rank] = b.getSize()
        data_type = mpi_datatype.MPI_INT
        op = mpi_op.MPI_SUM

        nparts_arr = orbit_mpi.MPI_Allreduce(nparts_arr_local, data_type, op,
                                             comm)

        if (b.hasPartAttr("ParticleIdNumber") == 0):
            b.addPartAttr("ParticleIdNumber")

        if (fixedidnumber >= 0):
            for i in range(part_ind, b.getSize()):
                b.partAttrValue("ParticleIdNumber", i, 0, fixedidnumber)

        else:
            istart = 0
            if (rank == 0):
                istart = 0
            else:
                for i in range(rank):
                    istart = istart + nparts_arr[i]

            for i in range(b.getSize()):
                idnumber = istart + i
                b.partAttrValue("ParticleIdNumber", i, 0, idnumber)
Exemplo n.º 6
0
	def addParticles(self):
		(xmin,xmax,ymin,ymax) = self.injectregion
	
		#adjusts number of particles injected according to varying pattern width
		if self.lDistFunc.name == "JohoLongitudinalPaint":
			self.lDistFunc.getCoordinates()
			self.npartsfloat = self.lDistFunc.frac_change*self.npartsfloat
			self.nparts = int(round(self.npartsfloat))
		
		rank = 0
		numprocs = 1
		
		mpi_init = orbit_mpi.MPI_Initialized()
		comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
		
		if(mpi_init):
			rank = orbit_mpi.MPI_Comm_rank(comm)
			numprocs = orbit_mpi.MPI_Comm_size(comm)
		
		nPartsGlobal = self.bunch.getSizeGlobal()
		
		if(self.nmaxmacroparticles > 0):
			if(nPartsGlobal >= self.nmaxmacroparticles):
				return
				
				#if((nTurnsDone % injectTurnInterval) != 0):
	#return
		
		x_rank0 = []
		xp_rank0 = []
		y_rank0 = []
		yp_rank0 = []
		z_rank0 = []
		dE_rank0 = []
		ninjected_rank0 = 0
		x_local = []
		xp_local = []
		y_local = []
		yp_local = []
		z_local = []
		dE_local = []
		ninjectedlocal  = 0

		if(rank == 0):
			for i in xrange(int(self.nparts)):
				(x,px) = self.xDistFunc.getCoordinates()
				(y,py) = self.yDistFunc.getCoordinates()
				(z,dE) = self.lDistFunc.getCoordinates()

				if((x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)):
					ninjectedlocal = ninjectedlocal + 1
					x_rank0.append(x)
					xp_rank0.append(px)
					y_rank0.append(y)
					yp_rank0.append(py)
					z_rank0.append(z)
					dE_rank0.append(dE)
					#self.bunch.addParticle(x,px,y,py,z,dE)
				else:
					self.lostbunch.addParticle(x,px,y,py,z,dE)
					#self.bunch.compress()
		
		ninjected = orbit_mpi.MPI_Bcast(ninjectedlocal, mpi_datatype.MPI_INT, 0, comm)
		x_local = orbit_mpi.MPI_Bcast(x_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		xp_local = orbit_mpi.MPI_Bcast(xp_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		y_local = orbit_mpi.MPI_Bcast(y_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		yp_local = orbit_mpi.MPI_Bcast(yp_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		z_local = orbit_mpi.MPI_Bcast(z_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)
		dE_local = orbit_mpi.MPI_Bcast(dE_rank0, mpi_datatype.MPI_DOUBLE, 0, comm)

		n_remainder = ninjected % numprocs;
		n_inj_local = ninjected/numprocs;
	
		#inject the equal number of particles on each CPU
	
		i_start = rank * n_inj_local
		i_stop = (rank+1) * n_inj_local 
		for i in xrange(i_start, i_stop):
			self.bunch.addParticle(x_local[i],xp_local[i],y_local[i],yp_local[i],z_local[i],dE_local[i])
				
		n_max_index = numprocs * n_inj_local

		for i in xrange (n_remainder - 1):
			i_cpu = int( numprocs * random.random())
			orbit_mpi.MPI_Bcast(i_cpu, mpi_datatype.MPI_INT, 0,comm)
			if(rank == i_cpu):
				self.bunch.addParticle(x_local[i + 1 + n_max_index], xp_local[i + 1 + n_max_index], y_local[i + 1 + n_max_index],
									   yp_local[i + 1 + n_max_index],z_local[i + 1 + n_max_index], dE_local[i + 1 + n_max_index])
				n_inj_local = n_inj_local + 1
					
		#nInjectedMacros += n_inj_local;
		
		self.bunch.compress()
		self.lostbunch.compress()
Exemplo n.º 7
0
    def addParticles(self):
        """
		Performs injections.
		"""
        #---- User can skip injection if self.nparts is zero
        if (self.nparts == 0): return
        random.seed(100)

        (xmin, xmax, ymin, ymax) = self.injectregion

        #adjusts number of particles injected according to varying pattern width
        if self.lDistFunc.name == "JohoLongitudinalPaint":
            self.lDistFunc.getCoordinates()
            self.npartsfloat = self.lDistFunc.frac_change * self.npartsfloat
            self.nparts = int(round(self.npartsfloat))

        rank = 0
        numprocs = 1

        mpi_init = orbit_mpi.MPI_Initialized()
        comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD

        if (mpi_init):
            rank = orbit_mpi.MPI_Comm_rank(comm)
            numprocs = orbit_mpi.MPI_Comm_size(comm)

        nPartsGlobal = self.bunch.getSizeGlobal()

        if (self.nmaxmacroparticles > 0):
            if (nPartsGlobal >= self.nmaxmacroparticles):
                return

        x_rank0 = []
        xp_rank0 = []
        y_rank0 = []
        yp_rank0 = []
        z_rank0 = []
        dE_rank0 = []
        ninjected_rank0 = 0
        x_local = []
        xp_local = []
        y_local = []
        yp_local = []
        z_local = []
        dE_local = []
        ninjectedlocal = 0

        if (rank == 0):
            for i in range(int(self.nparts)):
                (x, px) = self.xDistFunc.getCoordinates()
                (y, py) = self.yDistFunc.getCoordinates()
                (z, dE) = self.lDistFunc.getCoordinates()

                if ((x > xmin) & (x < xmax) & (y > ymin) & (y < ymax)):
                    ninjectedlocal = ninjectedlocal + 1
                    x_rank0.append(x)
                    xp_rank0.append(px)
                    y_rank0.append(y)
                    yp_rank0.append(py)
                    z_rank0.append(z)
                    dE_rank0.append(dE)
                else:
                    self.addLostParticle(self.bunch, self.lostbunch, x, px, y,
                                         py, z, dE)

        nPartsLostGlobal = self.lostbunch.getSizeGlobal()
        nPartsTotalGlobal = nPartsGlobal + nPartsLostGlobal

        ninjected = orbit_mpi.MPI_Bcast(ninjectedlocal, mpi_datatype.MPI_INT,
                                        0, comm)
        x_local = orbit_mpi.MPI_Bcast(x_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                      comm)
        xp_local = orbit_mpi.MPI_Bcast(xp_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                       comm)
        y_local = orbit_mpi.MPI_Bcast(y_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                      comm)
        yp_local = orbit_mpi.MPI_Bcast(yp_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                       comm)
        z_local = orbit_mpi.MPI_Bcast(z_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                      comm)
        dE_local = orbit_mpi.MPI_Bcast(dE_rank0, mpi_datatype.MPI_DOUBLE, 0,
                                       comm)

        n_remainder = ninjected % numprocs
        n_inj_local = ninjected / numprocs

        #---- inject the equal number of particles on each CPU
        i_start = rank * n_inj_local
        i_stop = (rank + 1) * n_inj_local
        for i in range(i_start, i_stop):
            particleId = nPartsTotalGlobal + i
            self.addInjectedParticle(self.bunch, x_local[i], xp_local[i],
                                     y_local[i], yp_local[i], z_local[i],
                                     dE_local[i], particleId)

        #---- inject the reminder of the particles
        n_max_index = numprocs * n_inj_local

        nPartsGlobal = self.bunch.getSizeGlobal()
        nPartsLostGlobal = self.lostbunch.getSizeGlobal()
        nPartsTotalGlobal = nPartsGlobal + nPartsLostGlobal

        for i in range(n_remainder):
            i_cpu = random.randint(0, numprocs - 1)
            i_cpu = orbit_mpi.MPI_Bcast(i_cpu, mpi_datatype.MPI_INT, 0, comm)
            if (rank == i_cpu):
                particleId = nPartsTotalGlobal + i
                self.addInjectedParticle(self.bunch, x_local[i + n_max_index],
                                         xp_local[i + n_max_index],
                                         y_local[i + n_max_index],
                                         yp_local[i + n_max_index],
                                         z_local[i + n_max_index],
                                         dE_local[i + n_max_index], particleId)
                #---- here n_inj_local is just for information for debugging
                n_inj_local = n_inj_local + 1

        self.bunch.compress()
        self.lostbunch.compress()
Exemplo n.º 8
0
# MPI_Wtick()
# finalize([message])
#------------------------------------------------------
# and MPI constants:
# MPI_IDENT
# MPI_CONGRUENT
# MPI_SIMILAR
# MPI_UNEQUAL
# MPI_UNDEFINED
# MPI_UNDEFINED_RANK
# MPI_SUCCESS
# MPI_ANY_SOURCE
# MPI_ANY_TAG
#------------------------------------------------------

mpi_init = orbit_mpi.MPI_Initialized()

cpu = orbit_mpi.MPI_Get_processor_name()
rank = orbit_mpi.MPI_Comm_rank(orbit_mpi.mpi_comm.MPI_COMM_WORLD)
size = orbit_mpi.MPI_Comm_size(orbit_mpi.mpi_comm.MPI_COMM_WORLD)
t = orbit_mpi.MPI_Wtime()
tick = orbit_mpi.MPI_Wtick()

#---------MPI Constant -----------------------
if (rank == 0):
    print "init=", mpi_init, " rank=", rank, " size=", size, " name=", cpu, " time=", t, " tick=", tick
    print "MPI_IDENT=", orbit_mpi.MPI_IDENT
    print "MPI_CONGRUENT=", orbit_mpi.MPI_CONGRUENT
    print "MPI_SIMILAR=", orbit_mpi.MPI_SIMILAR
    print "MPI_UNEQUAL=", orbit_mpi.MPI_UNEQUAL
    print "MPI_UNDEFINED=", orbit_mpi.MPI_UNDEFINED