Beispiel #1
0

wallId=O.bodies.append(box(center=(0,0,0),extents=(2,0,1),fixed=True))
for x in range(-1,2):
	O.bodies.append(sphere((x,0.5,0),0.5))

newton.gravity=(0,-10,0) #else nothing would move
O.dt=0.1*PWaveTimeStep() 
O.dynDt=False #very important, we don't want subdomains to use many different timesteps...

from yade import mpy as mp
mp.initialize(3)

if mp.rank==0:
	print( mp.sendCommand(executors="all",command="len(O.bodies)",wait=True) )

	#note that 'rank' is used instead of mp.rank, the scope of the mpy module is accessed here
	mp.sendCommand(executors=[1,2],command= "ids=O.bodies.append([sphere((xx,1.5+rank,0),0.5) for xx in range(-1,2)])",wait=True)

	print( mp.sendCommand(executors="all",command="len(O.bodies)",wait=True) )

	mp.sendCommand(executors=[1,2],command= "list(map(lambda b: setattr(b,'subdomain',rank),O.bodies))", wait=True)

	print("Assigned bodies:", mp.sendCommand([1,2], "len([b for b in O.bodies if b.subdomain==rank])", True) )
	
mp.DISTRIBUTED_INSERT=True
mp.MERGE_W_INTERACTIONS=True
mp.ERASE_REMOTE_MASTER=False

# we can exploit sendCommand to send data between yade instances directly with mpi4py functions (see mpi4py documentation)
#Yade [8]: mp.sendCommand(executors=1,command="message=comm.recv(source=0); print('received',message)",wait=False)
Beispiel #2
0
newton.gravity=(0,-10,0) #else nothing would move
tsIdx=O.engines.index(timeStepper) #remove the automatic timestepper. Very important: we don't want subdomains to use many different timesteps...
O.engines=O.engines[0:tsIdx]+O.engines[tsIdx+1:]
O.dt=0.001 #this very small timestep will make it possible to run 2000 iter without merging
#O.dt=0.1*PWaveTimeStep() #very important, we don't want subdomains to use many different timesteps...

# customize mpy
mp.ACCUMULATE_FORCES=True #trigger force summation on master's body (here WALL_ID)
mp.VERBOSE_OUTPUT=False
mp.ERASE_REMOTE=False #erase bodies not interacting wit a given subdomain?
mp.OPTIMIZE_COM=True #L1-optimization: pass a list of double instead of a list of states
mp.USE_CPP_MPI=True and mp.OPTIMIZE_COM #L2-optimization: workaround python by passing a vector<double> at the c++ level
mp.MERGE_W_INTERACTIONS=False
mp.COPY_MIRROR_BODIES_WHEN_COLLIDE = False
mp.VERBOSE_OUTPUT=False
mp.YADE_TIMING=False
mp.NO_OUTPUT=True

mp.mpirun(NSTEPS,numThreads)
mp.mprint( "num. bodies:",len([b for b in O.bodies]))
mp.mprint( "Partial force on floor="+str(O.forces.f(WALL_ID)[1]))

Ek=0
if mp.rank==0:
	Ek=sum(mp.sendCommand([1,2,3],"kineticEnergy()",True))
	mp.mprint("got Ek=",Ek)
	refEk=1203790.66007
	if (abs(Ek-refEk)/refEk)>1e-10:
		raise YadeCheckError("kinetic energy changed by"+str((Ek-refEk)/refEk))

Beispiel #3
0
                    label="interactionLoop"),
    GlobalStiffnessTimeStepper(timestepSafetyCoefficient=0.3,
                               timeStepUpdateInterval=100,
                               parallelMode=True,
                               label='timeStepper'),
    NewtonIntegrator(damping=0.1, gravity=(0, -0.1, 0), label='newton'),
    VTKRecorder(
        fileName='spheres/3d-vtk-',
        recorders=['spheres', 'intr', 'boxes'],
        parallelMode=True,
        iterPeriod=500
    ),  #use .pvtu to open spheres, .pvtp for ints, and .vtu for boxes.
]

collider.verletDist = 1.5

#########  RUN  ##########
# customize mpy
mp.ERASE_REMOTE_MASTER = True
mp.USE_CPP_REALLOC = True
mp.USE_CPP_INTERS = True
mp.DOMAIN_DECOMPOSITION = True
mp.mpirun(NSTEPS)
mp.mergeScene()
if mp.rank == 0: O.save('mergedScene.yade')

#demonstrate getting stuff from workers
if mp.rank == 0:
    print("kinetic energy from workers: " +
          str(mp.sendCommand([1, 2], "kineticEnergy()", True)))
Beispiel #4
0
# 2018 © Bruno Chareyre <*****@*****.**> 

# Really a helloWorld (or almost, I couldn't refrain from adding a bit more).
# Check other examples for concrete usage of mpy in DEM simulations 

from yade import mpy as mp
mp.initialize(3)
#mp.VERBOSE_OUTPUT = True # to see more of what happens behind the scene

mp.mprint("I'm here")

if mp.rank==0:
        # say hello:
        mp.sendCommand(executors=[1,2],command="mprint('Yes I am really here')",wait=False)
        
        # get retrun values if wait=True (blocking comm., think twice)
        print( mp.sendCommand(executors="all",command="len(O.bodies)",wait=True) )
        
        ## exploit sendCommand to send data between yade instances directly with mpi4py (see mpi4py documentation)
        ## in the message we actually tell the worker to wait another message (nested comm), but the second one
        ## uses underlying mpi4py, and it handles pickable objects
        mp.sendCommand(executors=1,command="message=comm.recv(source=0); mprint('received: ',message)",wait=False)
        mp.comm.send("hello",dest=1)
        
        ## pickable objects 
        ## pay attention to pointer adresses, they are different! (as expected)
        ## this is moving data around between independent parts of memory
        mp.sendCommand(executors=1,command="O.bodies.append(Body()); O.bodies[0].shape=Sphere(radius=0.456); comm.send(O.bodies[0],dest=0); mprint('sent a ',O.bodies[0].shape)",wait=False)
        bodyFrom1 = mp.comm.recv(source=1)
        mp.mprint("received a ",bodyFrom1.shape,"with radius=",bodyFrom1.shape.radius)