def test_nodes(): port_num = 9001 mdi_driver_options = "-role DRIVER -name driver -method TCP -port " + str( port_num) # Get the number of nodes #driver_proc = subprocess.Popen([sys.executable, "min_driver.py", "-command", "<NNODES", # "-nreceive", "1", "-rtype", "MDI_INT", # "-mdi", mdi_driver_options], # stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd="./drivers") # Run LAMMPS as an engine mdi_engine_options = "-role ENGINE -name TESTCODE -method TCP -hostname localhost -port " + str( port_num) working_dir = "../../user/mdi_tests/test1" user_path = os.environ['USER_PATH'] os.system("rm -rf ./_work") os.system("cp -r " + str(working_dir) + " _work") engine_path = str(user_path) + "/lammps/src/lmp_mdi" engine_proc = subprocess.Popen( [engine_path, "-mdi", mdi_engine_options, "-in", "lammps.in"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd="./_work") # Convert the driver's output into a string #driver_tup = driver_proc.communicate() #driver_out = format_return(driver_tup[0]) #driver_err = format_return(driver_tup[1]) #print("CHECK_MDI_NODES.PY") #print(" Driver out: " + str(driver_out)) #print(" Driver err: " + str(driver_err)) mdi.MDI_Init(mdi_driver_options, None) comm = mdi.MDI_Accept_Communicator() nnodes = mdi.MDI_Get_NNodes(comm) print("NNodes: " + str(nnodes)) nodes = [mdi.MDI_Get_Node(inode, comm) for inode in range(nnodes)] print("Nodes: " + str(nodes)) for node in nodes: ncommands = mdi.MDI_Get_NCommands(node, comm) commands = [ mdi.MDI_Get_Command(node, icommand, comm) for icommand in range(ncommands) ] print("Commands: " + str(commands)) mdi.MDI_Send_Command("EXIT", comm) engine_tup = engine_proc.communicate() engine_out = format_return(engine_tup[0]) engine_err = format_return(engine_tup[1])
iarg += 2 elif args[iarg] == "-seed": if iarg + 2 > narg: error() seed = int(args[iarg + 1]) if seed <= 0: error() iarg += 2 else: error() if not mdiarg: error() mdi.MDI_Init(mdiarg) # LAMMPS engine is a stand-alone code # world = MPI communicator for just this driver # invoke perform_tasks() directly if not plugin: world = mdi.MDI_MPI_get_world_comm() mdicomm = mdi.MDI_Accept_Communicator() perform_tasks(world, mdicomm, None) # LAMMPS engine is a plugin library # launch plugin # MDI will call back to perform_tasks() if plugin: world = MPI.COMM_WORLD plugin_args += " -mdi \"-role ENGINE -name LMP -method LINK\"" mdi.MDI_Launch_plugin(plugin, plugin_args, world, perform_tasks, None)
else: error() if not mdiarg: error() # LAMMPS engines are stand-alone codes # world = MPI communicator for just this driver # invoke perform_tasks() directly if not plugin: mdi.MDI_Init(mdiarg) world = mdi.MDI_MPI_get_world_comm() # connect to 2 engines, determine which is MM vs QM mdicomm1 = mdi.MDI_Accept_Communicator() mdicomm2 = mdi.MDI_Accept_Communicator() mdi.MDI_Send_command("<NAME", mdicomm1) name1 = mdi.MDI_Recv(mdi.MDI_NAME_LENGTH, mdi.MDI_CHAR, mdicomm1) name1 = world.bcast(name1, root=0) mdi.MDI_Send_command("<NAME", mdicomm2) name2 = mdi.MDI_Recv(mdi.MDI_NAME_LENGTH, mdi.MDI_CHAR, mdicomm2) name2 = world.bcast(name2, root=0) if name1 == "MM" and name2 == "QM": mm_comm = mdicomm1 qm_comm = mdicomm2 elif name1 == "QM" and name2 == "MM": mm_comm = mdicomm2 qm_comm = mdicomm1
mpi_task_comm = mpi_world.Split(color, key) task_rank = mpi_task_comm.Get_rank() else: mpi_task_comm = None task_rank = 0 # Check if this connection uses the LIBRARY method method = mdi.MDI_LIB niterations = 10 for iiteration in range(niterations): # Create and connect to a library instance that spans the MPI task communicator MDIEngine("-role ENGINE -name MM -method LIBRARY -driver_name driver", mpi_task_comm) comm = mdi.MDI_Accept_Communicator() # Create and connect to a library instance that spans MPI_COMM_WORLD MDIEngine("-role ENGINE -name unsplit -method LIBRARY -driver_name driver", mpi_world) comm_unsplit = mdi.MDI_Accept_Communicator() # Communicate with the library instance that spans the MPI task communicator mdi.MDI_Send_Command("<NATOMS", comm) natoms = mdi.MDI_Recv(1, mdi.MDI_INT, comm) if world_rank == 0: print("NATOMS: " + str(natoms)) mdi.MDI_Send_Command("EXIT", comm) # Communicate with the library instance that spans MPI_COMM_WORLD mdi.MDI_Send_Command("<NATOMS", comm_unsplit)