コード例 #1
0
ファイル: testMPI_3D.py プロジェクト: Drusyc/yade-mpi
if rank is None:  #######  Single-core  ######
    O.timingEnabled = True
    O.run(NSTEPS, True)
    #print "num bodies:",len(O.bodies)
    from yade import timing
    timing.stats()
    collectTiming()
    print "num. bodies:", len([b for b in O.bodies]), len(O.bodies)
    print "Total force on floor=", O.forces.f(WALL_ID)[1]
else:  #######  MPI  ######
    #import yade's mpi module
    from yade import mpy as mp
    # customize
    mp.ACCUMULATE_FORCES = True  #trigger force summation on master's body (here WALL_ID)
    mp.VERBOSE_OUTPUT = False
    mp.ERASE_REMOTE = True  #erase bodies not interacting wit a given subdomain?
    mp.OPTIMIZE_COM = True  #L1-optimization: pass a list of double instead of a list of states
    mp.USE_CPP_MPI = True and mp.OPTIMIZE_COM  #L2-optimization: workaround python by passing a vector<double> at the c++ level

    mp.mpirun(NSTEPS, True)  # passing numThreads
    print "num. bodies:", len([b for b in O.bodies]), len(O.bodies)
    if rank == 0:
        mp.mprint("Total force on floor=" + str(O.forces.f(WALL_ID)[1]))
        collectTiming()
    else:
        mp.mprint("Partial force on floor=" + str(O.forces.f(WALL_ID)[1]))
    mp.mergeScene()
    if rank == 0: O.save('mergedScene.yade')
    mp.MPI.Finalize()
#exit()
コード例 #2
0
    from yade import timing
    timing.stats()
    collectTiming()
    print "num. bodies:", len([b for b in O.bodies]), len(O.bodies)
    print "Total force on floor=", O.forces.f(WALL_ID)[1]
else:  #######  MPI  ######
    #import yade's mpi module
    from yade import mpy as mp
    # customize
    mp.ACCUMULATE_FORCES = True  #trigger force summation on master's body (here WALL_ID)
    mp.VERBOSE_OUTPUT = False
    mp.ERASE_REMOTE = True  #erase bodies not interacting wit a given subdomain?
    mp.OPTIMIZE_COM = True  #L1-optimization: pass a list of double instead of a list of states
    mp.USE_CPP_MPI = True and mp.OPTIMIZE_COM  #L2-optimization: workaround python by passing a vector<double> at the c++ level
    if rank == 0: start = time.time()
    mp.mpirun(NSTEPS, merge)
    if rank == 0: end = time.time()
    print "num. bodies:", len([b for b in O.bodies]), len(O.bodies)
    if rank == 0:
        mp.mprint("Total force on floor=" + str(O.forces.f(WALL_ID)[1]))
        collectTimingMPI()
    else:
        mp.mprint("Partial force on floor=" + str(O.forces.f(WALL_ID)[1]))
    #if merge: mp.mergeScene()
    #if rank==0: O.save('mergedScene.yade')
    mp.MPI.Finalize()

if rank == 0:
    wallTime = end - start
    created = os.path.isfile("wallTimes_2D_weakScalability-PREHACK-" +
                             timeStr + ".dat")
コード例 #3
0
                    label="interactionLoop"),
    GlobalStiffnessTimeStepper(timestepSafetyCoefficient=0.3,
                               timeStepUpdateInterval=100,
                               parallelMode=True,
                               label='timeStepper'),
    NewtonIntegrator(damping=0.1, gravity=(0, -0.1, 0), label='newton'),
    VTKRecorder(
        fileName='spheres/3d-vtk-',
        recorders=['spheres', 'intr', 'boxes'],
        parallelMode=True,
        iterPeriod=500
    ),  #use .pvtu to open spheres, .pvtp for ints, and .vtu for boxes.
]

collider.verletDist = 1.5

#########  RUN  ##########
# customize mpy
mp.ERASE_REMOTE_MASTER = True
mp.USE_CPP_REALLOC = True
mp.USE_CPP_INTERS = True
mp.DOMAIN_DECOMPOSITION = True
mp.mpirun(NSTEPS)
mp.mergeScene()
if mp.rank == 0: O.save('mergedScene.yade')

#demonstrate getting stuff from workers
if mp.rank == 0:
    print("kinetic energy from workers: " +
          str(mp.sendCommand([1, 2], "kineticEnergy()", True)))
コード例 #4
0
ファイル: Case1_SiloFlow.py プロジェクト: HuanranWU/Yade
    mp.DISTRIBUTED_INSERT = True
    mp.REALLOCATE_FREQUENCY = 4
    mp.ACCUMULATE_FORCES = False
    mp.MAX_RANK_OUTPUT = 4

else:
    O.bodies.append(facets)
    O.bodies.append(sp_new)

collider.verletDist = 0.5
O.dt = 1e-10
O.dynDt = False

if numMPIThreads > 1:
    mp.mpirun(
        1, numMPIThreads, False
    )  #this is to eliminate initialization overhead in Cundall number and timings
    mp.YADE_TIMING = True
    t1 = time.time()
    mp.mpirun(NSTEPS, withMerge=False)
    t2 = time.time()
    mp.mprint("num. bodies:", len([b for b in O.bodies]), " ", len(O.bodies))
    if mp.rank == 0:
        mp.mprint("CPU wall time for ", NSTEPS, " iterations:", t2 - t1,
                  "; Cundall number = TODO")
    #mp.mergeScene()

else:
    O.run(1, True)
    t1 = time.time()
    O.run(NSTEPS, True)
コード例 #5
0
    O.run(NSTEPS + 1, True)
    #print "num bodies:",len(O.bodies)
    from yade import timing
    timing.stats()
    collectTiming()
    print("num. bodies:", len([b for b in O.bodies]), len(O.bodies))
    print("Total force on floor=", O.forces.f(WALL_ID)[1])
else:  #######  MPI  ######
    #import yade's mpi module
    from yade import mpy as mp
    # customize
    mp.ACCUMULATE_FORCES = True  #trigger force summation on master's body (here WALL_ID)
    mp.VERBOSE_OUTPUT = False
    mp.MAX_RANK_OUTPUT = 4
    mp.mpirun(
        1
    )  #this is to eliminate initialization overhead in Cundall number and timings
    from yade import timing
    timing.reset()
    t1 = time.time()
    mp.mpirun(NSTEPS)
    t2 = time.time()
    mp.mprint("num. bodies:", len([b for b in O.bodies]), " ", len(O.bodies))
    if rank == 0:
        mp.mprint("Total force on floor=" + str(O.forces.f(WALL_ID)[1]))
        mp.mprint("CPU wall time for ", NSTEPS, " iterations:", t2 - t1,
                  "; Cundall number = ",
                  len(O.bodies) * NSTEPS / (t2 - t1))
        collectTiming()
    else:
        mp.mprint("Partial force on floor=" + str(O.forces.f(WALL_ID)[1]))
コード例 #6
0
ファイル: checkMPI.py プロジェクト: Togekey/yade_trunk
    timeStepper
)  #remove the automatic timestepper. Very important: we don't want subdomains to use many different timesteps...
O.engines = O.engines[0:tsIdx] + O.engines[tsIdx + 1:]
O.dt = 0.001  #this very small timestep will make it possible to run 2000 iter without merging
#O.dt=0.1*PWaveTimeStep() #very important, we don't want subdomains to use many different timesteps...

# customize mpy
mp.ACCUMULATE_FORCES = True  #trigger force summation on master's body (here WALL_ID)
mp.VERBOSE_OUTPUT = False
mp.ERASE_REMOTE = False  #erase bodies not interacting wit a given subdomain?
mp.OPTIMIZE_COM = True  #L1-optimization: pass a list of double instead of a list of states
mp.USE_CPP_MPI = True and mp.OPTIMIZE_COM  #L2-optimization: workaround python by passing a vector<double> at the c++ level
mp.MERGE_W_INTERACTIONS = False
mp.COPY_MIRROR_BODIES_WHEN_COLLIDE = True
mp.VERBOSE_OUTPUT = False
mp.YADE_TIMING = False
mp.NO_OUTPUT = True

mp.mpirun(NSTEPS, numThreads, True)
mp.mprint("num. bodies:", len([b for b in O.bodies]))
mp.mprint("Partial force on floor=" + str(O.forces.f(WALL_ID)[1]))

Ek = 0
if mp.rank == 0:
    Ek = kineticEnergy()
    mp.mprint("got Ek=", Ek)
    refEk = 1120803.9955506378
    if (abs(Ek - refEk) / refEk) > 1e-10:
        raise YadeCheckError("kinetic energy changed by" +
                             str((Ek - refEk) / refEk))
コード例 #7
0
#O.dt=0.1*PWaveTimeStep() #very important, we don't want subdomains to use many different timesteps...


#########  RUN  ##########
def collectTiming():
    created = os.path.isfile("collect.dat")
    f = open('collect.dat', 'a')
    if not created: f.write("numThreads mpi omp Nspheres N M runtime \n")
    from yade import timing
    f.write(
        str(numThreads) + " " + str(os.getenv('OMPI_COMM_WORLD_SIZE')) + " " +
        os.getenv('OMP_NUM_THREADS') + " " + str(N * M * (numThreads - 1)) +
        " " + str(N) + " " + str(M) + " " + str(timing.runtime()) + "\n")
    f.close()


# customize mpy
mp.MERGE_W_INTERACTIONS = False
mp.ERASE_REMOTE_MASTER = False

mp.mpirun(NSTEPS + 1, 4,
          True)  #+1 in order to be consistent with other example scripts
mp.mprint("num. bodies:", len([b for b in O.bodies]), len(O.bodies))
mp.mprint("Partial force on floor=" + str(O.forces.f(WALL_ID)[1]))

#demonstrate getting stuff from workers
if mp.rank == 0:
    print("kinetic energy from workers: " +
          str(mp.sendCommand([1, 2], "kineticEnergy()", True)))
コード例 #8
0
ファイル: reallocateBodies.py プロジェクト: HuanranWU/Yade
def animate(N,nsteps):
	for i in range(N):
		mp.mpirun(nsteps,4,True)
		for b in O.bodies: b.shape.color=colorScale[b.subdomain]
		time.sleep(0.1)
コード例 #9
0
if rank is None: #######  Single-core  ######
	O.timingEnabled=True
	O.run(NSTEPS,True)
	#print "num bodies:",len(O.bodies)
	from yade import timing
	timing.stats()
	collectTiming()
	print "num. bodies:",len([b for b in O.bodies]),len(O.bodies)
	print "Total force on floor=",O.forces.f(WALL_ID)[1]
else: #######  MPI  ######
	#import yade's mpi module
	from yade import mpy as mp
	# customize
	mp.ACCUMULATE_FORCES=True #trigger force summation on master's body (here WALL_ID)
	mp.VERBOSE_OUTPUT=False
	mp.ERASE_REMOTE=True #erase bodies not interacting wit a given subdomain?
	mp.OPTIMIZE_COM=True #L1-optimization: pass a list of double instead of a list of states
	mp.USE_CPP_MPI=True and mp.OPTIMIZE_COM #L2-optimization: workaround python by passing a vector<double> at the c++ level

	mp.mpirun(NSTEPS,True)
	print "num. bodies:",len([b for b in O.bodies]),len(O.bodies)
	if rank==0:
		mp.mprint( "Total force on floor="+str(O.forces.f(WALL_ID)[1]))
		collectTiming()
	else: mp.mprint( "Partial force on floor="+str(O.forces.f(WALL_ID)[1]))
	mp.mergeScene()
	#if rank==0: O.save('mergedScene.yade')
	mp.MPI.Finalize()
exit()
コード例 #10
0
ファイル: checkMPI.py プロジェクト: Zhijie-YU/yade-copy
newton.gravity=(0,-10,0) #else nothing would move
tsIdx=O.engines.index(timeStepper) #remove the automatic timestepper. Very important: we don't want subdomains to use many different timesteps...
O.engines=O.engines[0:tsIdx]+O.engines[tsIdx+1:]
O.dt=0.001 #this very small timestep will make it possible to run 2000 iter without merging
#O.dt=0.1*PWaveTimeStep() #very important, we don't want subdomains to use many different timesteps...

# customize mpy
mp.ACCUMULATE_FORCES=True #trigger force summation on master's body (here WALL_ID)
mp.VERBOSE_OUTPUT=False
mp.ERASE_REMOTE=False #erase bodies not interacting wit a given subdomain?
mp.OPTIMIZE_COM=True #L1-optimization: pass a list of double instead of a list of states
mp.USE_CPP_MPI=True and mp.OPTIMIZE_COM #L2-optimization: workaround python by passing a vector<double> at the c++ level
mp.MERGE_W_INTERACTIONS=False
mp.COPY_MIRROR_BODIES_WHEN_COLLIDE = False
mp.VERBOSE_OUTPUT=False
mp.YADE_TIMING=False
mp.NO_OUTPUT=True

mp.mpirun(NSTEPS,numThreads)
mp.mprint( "num. bodies:",len([b for b in O.bodies]))
mp.mprint( "Partial force on floor="+str(O.forces.f(WALL_ID)[1]))

Ek=0
if mp.rank==0:
	Ek=sum(mp.sendCommand([1,2,3],"kineticEnergy()",True))
	mp.mprint("got Ek=",Ek)
	refEk=1203790.66007
	if (abs(Ek-refEk)/refEk)>1e-10:
		raise YadeCheckError("kinetic energy changed by"+str((Ek-refEk)/refEk))

コード例 #11
0
def collectTiming():
    created = os.path.isfile("collect.dat")
    f = open('collect.dat', 'a')
    if not created: f.write("numThreads mpi omp Nspheres N M runtime \n")
    from yade import timing
    f.write(
        str(numThreads) + " " + str(os.getenv('OMPI_COMM_WORLD_SIZE')) + " " +
        os.getenv('OMP_NUM_THREADS') + " " + str(N * M * (numThreads - 1)) +
        " " + str(N) + " " + str(M) + " " + str(timing.runtime()) + "\n")
    f.close()


# customize mpy
mp.ACCUMULATE_FORCES = True  #trigger force summation on master's body (here WALL_ID)
mp.VERBOSE_OUTPUT = False
mp.ERASE_REMOTE = False  #erase bodies not interacting wit a given subdomain?
mp.OPTIMIZE_COM = True  #L1-optimization: pass a list of double instead of a list of states
mp.USE_CPP_MPI = True and mp.OPTIMIZE_COM  #L2-optimization: workaround python by passing a vector<double> at the c++ level
mp.MERGE_W_INTERACTIONS = False
mp.MERGE_SPLIT = mergeSplit
mp.COPY_MIRROR_BODIES_WHEN_COLLIDE = bodyCopy and not mergeSplit

mp.mpirun(NSTEPS, 4)
mp.mprint("num. bodies:", len([b for b in O.bodies]), len(O.bodies))
mp.mprint("Partial force on floor=" + str(O.forces.f(WALL_ID)[1]))

#demonstrate getting stuff from workers
if mp.rank == 0:
    print("kinetic energy from workers: " +
          str(mp.sendCommand([1, 2], "kineticEnergy()", True)))