def mpirun_tests(args=None): """This is used in the "if __name__ == '__main__'" block to run all tests in that module. The tests are run using mpirun. """ if args is None: args = sys.argv[1:] mod = __import__('__main__') if '-ns' in args: nocap = False else: nocap = True tests = [n for n in args if not n.startswith('-')] options = [n for n in args if n.startswith('-')] if tests: for test in tests: tcase, _, method = test.partition('.') if method: parent = getattr(mod, tcase)(methodName=method) if hasattr(parent, 'N_PROCS') and not under_mpirun(): retcode = run_in_sub(getattr(mod, tcase), test, options) continue else: raise NotImplentedError("module test functions not supported yet") #parent = mod #method = tcase tspec = "%s:%s" % (mod.__file__, test) if MPI.COMM_WORLD.rank == 0: sys.stdout.write("%s ... " % tspec) result = run_test(tspec, parent, method, nocap=nocap) if under_mpirun(): results = MPI.COMM_WORLD.gather(result, root=0) if MPI.COMM_WORLD.rank == 0: for i,r in enumerate(results): if r.status != 'OK': sys.stdout.write("%s\nERROR in rank %d:\n" % (r.status, i)) sys.stdout.write("%s\n" % r.err_msg) break else: sys.stdout.write("%s\n" % r.status) else: sys.stdout.write("%s\n" % r.status) else: # find all test methods in the file and mpi run ourselves for each one for k,v in getmembers(mod, isclass): if issubclass(v, TestCase): for n, method in getmembers(v, ismethod): if n.startswith('test_'): testspec = k+'.'+n if not hasattr(v, 'N_PROCS'): print(run_test(testspec, v(methodName=n), n, nocap=nocap)) else: retcode = run_in_sub(v, testspec, options)
def _check_mpi(self, out_stream=sys.stdout): if under_mpirun(): # Indicate that there are no parallel systems if user is running under MPI if MPI.COMM_WORLD.rank == 0: for grp in self.root.subgroups(recurse=True, include_self=True): if isinstance(grp, ParallelGroup): break else: print("\nRunning under MPI, but no ParallelGroups were found.", file=out_stream) mincpu, maxcpu = self.root.get_req_procs() if maxcpu is not None and MPI.COMM_WORLD.size > maxcpu: print("\nmpirun was given %d MPI processes, but the problem can only use %d" % (MPI.COMM_WORLD.size, maxcpu)) # or any ParalleGroups found when not running under MPI else: for grp in self.root.subgroups(recurse=True, include_self=True): if isinstance(grp, ParallelGroup): print("\nFound ParallelGroup '%s', but not running under MPI." % grp.pathname, file=out_stream)
"""functions useful for debugging openmdao""" import sys from pprint import pformat from openmdao.core.mpiwrap import MPI, under_mpirun if under_mpirun(): def debug(*msg): newmsg = ["%d: " % MPI.COMM_WORLD.rank] + list(msg) for m in newmsg: sys.stdout.write("%s " % m) sys.stdout.write('\n') sys.stdout.flush() else: def debug(*msg): for m in msg: sys.stdout.write("%s " % str(m)) sys.stdout.write('\n') def dump_meta(system, nest=0, out_stream=sys.stdout): """ Dumps the system tree with associated metadata for the params and unknowns `VecWrappers`. Args ---- system : `System` The node in the `System` tree where dumping begins.