def addStatus(self, test, err=None, error=False, failure=False): """Negotiate global status immediately after a test has been run. Called on all processor with the local test status (i.e. error, failure or success). 'err' is a tuple of values as returned by sys.exc_info(). """ if error and failure: raise RuntimeError('Parallel unittest can\'t handle simultaneous' \ + ' errors and failures within a single test.') self.comm.all_gather(np.array([error]), self.last_errors) if self.last_errors.any(): all_texts = [] for rank in np.argwhere(self.last_errors).ravel(): if rank == self.comm.rank: assert self.last_errors[self.comm.rank] text = self._exc_info_to_string(err, test) else: text = None text = broadcast_string(text, root=rank, comm=self.comm) all_texts.append((rank,text)) self.errors.append((test, all_texts)) self.comm.all_gather(np.array([failure]), self.last_failed) if self.last_failed.any(): all_texts = [] for rank in np.argwhere(self.last_failed).ravel(): if rank == self.comm.rank: assert self.last_failed[self.comm.rank] text = self._exc_info_to_string(err, test) else: text = None text = broadcast_string(text, root=rank, comm=self.comm) all_texts.append((rank,text)) self.failures.append((test, all_texts))
def search_for_file(name, world=None): """Traverse gpaw setup paths to find file. Returns the file path and file contents. If the file is not found, contents will be None.""" if world is not None and world.size > 1: if world.rank == 0: filename, source = search_for_file(name) if source is None: source = '' string = filename + '|' + source else: string = None filename, source = broadcast_string(string, 0, world).split('|', 1) if source == '': source = None return filename, source source = None filename = None for path in setup_paths: filename = os.path.join(path, name) if os.path.isfile(filename): source = open(filename).read() break else: filename += '.gz' if os.path.isfile(filename): if has_gzip: source = gzip.open(filename).read() else: source = os.popen('gunzip -c ' + filename, 'r').read() break return filename, source
def search_for_file(name, world=None): """Traverse gpaw setup paths to find file. Returns the file path and file contents. If the file is not found, contents will be None.""" if world is not None and world.size > 1: if world.rank == 0: filename, source = search_for_file(name) if source is None: source = "" string = filename + "|" + source else: string = None filename, source = broadcast_string(string, 0, world).split("|", 1) if source == "": source = None return filename, source source = None filename = None for path in setup_paths: filename = os.path.join(path, name) if os.path.isfile(filename): source = open(filename).read() break else: filename += ".gz" if os.path.isfile(filename): if has_gzip: source = gzip.open(filename).read() else: source = os.popen("gunzip -c " + filename, "r").read() break return filename, source
def gather_ndarray_dict(data, comm, broadcast=False): #data is dict of a numpy array, maybe has different shape in different cpus #this function gather them to the all_data in master, all_data is # a dict with the lenth world.size all_data = {} data_len = np.zeros([comm.size], int) data_len[comm.rank] = len(data) comm.sum(data_len) info = np.zeros([np.sum(data_len), 3], int) dtypes = [int, float, complex] for i, name in enumerate(data): base = np.sum(data_len[:comm.rank]) info[base + i, 0] = len(data[name].shape) info[base + i, 1] = comm.rank info[base + i, 2] = dtypes.index(data[name].dtype) comm.sum(info) if comm.rank == 0: for name in data: all_data[name] = data[name] for i in range(1, comm.size): base = np.sum(data_len[:i]) for j in range(data_len[i]): shape = np.zeros([info[base + j, 0]], int) dtype = dtypes[info[base + j, 2]] name = receive_string(i, comm) comm.receive(shape, i, 123) tmp = np.zeros(shape, dtype) comm.receive(tmp, i, 546) all_data[name] = tmp else: for name in data: send_string(name, 0, comm) shape = np.array(data[name].shape, int) comm.ssend(shape, 0, 123) comm.ssend(data[name], 0, 546) if broadcast: num = np.zeros([1], int) if comm.rank == 0: num[0] = len(all_data) comm.broadcast(num, 0) for name in all_data: broadcast_string(name, 0, comm) shape = np.array(all_data[name].shape, int) comm.broadcast(shape, 0) comm.broadcast(all_data[name], 0) else: comm.broadcast(num, 0) for i in range(num): name = broadcast_string(None, 0, comm) shape = np.zeros([info[i, 0]], int) dtype = dtypes[info[i, 2]] comm.broadcast(shape, 0) tmp = np.zeros(shape, dtype) comm.broadcast(tmp, 0) all_data[name] = tmp return all_data
def main(args=None): description = ('Run the GPAW test suite. The test suite can be run in ' 'parallel with MPI through gpaw-python. The test suite ' 'supports 1, 2, 4 or 8 CPUs although some tests are ' 'skipped for some parallelizations. If no TESTs are ' 'given, run all tests supporting the parallelization.') parser = OptionParser(usage='%prog [OPTION...] [TEST...]', description=description, version='%%prog %s' % version) parser.add_option('-x', '--exclude', type='string', default=None, help='Exclude tests (comma separated list of tests).', metavar='test1.py,test2.py,...') parser.add_option('-f', '--run-failed-tests-only', action='store_true', help='Run failed tests only.') parser.add_option('--from', metavar='TESTFILE', dest='from_test', help='Run remaining tests, starting from TESTFILE') parser.add_option('--after', metavar='TESTFILE', dest='after_test', help='Run remaining tests, starting after TESTFILE') parser.add_option('--range', type='string', default=None, help='Run tests in range test_i.py to test_j.py ' '(inclusive)', metavar='test_i.py,test_j.py') parser.add_option('-j', '--jobs', type='int', default=1, help='Run JOBS threads. Each test will be executed ' 'in serial by one thread. This option cannot be used ' 'for parallelization together with MPI.') parser.add_option('--reverse', action='store_true', help=('Run tests in reverse order (less overhead with ' 'multiple jobs)')) parser.add_option('-k', '--keep-temp-dir', action='store_true', dest='keep_tmpdir', help='Do not delete temporary files.') parser.add_option('-d', '--directory', help='Run test in this directory') parser.add_option('-s', '--show-output', action='store_true', help='Show standard output from tests.') opt, tests = parser.parse_args(args) if len(tests) == 0: from gpaw.test import tests if opt.reverse: tests.reverse() if opt.run_failed_tests_only: tests = [line.strip() for line in open('failed-tests.txt')] exclude = [] if opt.exclude is not None: exclude += opt.exclude.split(',') if opt.from_test: fromindex = tests.index(opt.from_test) tests = tests[fromindex:] if opt.after_test: index = tests.index(opt.after_test) + 1 tests = tests[index:] if opt.range: # default start(stop) index is first(last) test indices = opt.range.split(',') try: start_index = tests.index(indices[0]) except ValueError: start_index = 0 try: stop_index = tests.index(indices[1]) + 1 except ValueError: stop_index = len(tests) tests = tests[start_index:stop_index] if opt.jobs > 1: exclude.append('maxrss.py') for test in exclude: if test in tests: tests.remove(test) from gpaw.test import TestRunner if mpi.world.size > 8: if mpi.rank == 0: message = '!!!!!!!\n' \ 'GPAW regression test suite was not designed to run on more\n' \ 'than 8 MPI tasks. Re-run test suite using 1, 2, 4 or 8 MPI\n' \ 'tasks instead.' warnings.warn(message, RuntimeWarning) if mpi.rank == 0: if opt.directory is None: tmpdir = tempfile.mkdtemp(prefix='gpaw-test-') else: tmpdir = opt.directory if os.path.isdir(tmpdir): opt.keep_tmpdir = True else: os.mkdir(tmpdir) else: tmpdir = None tmpdir = mpi.broadcast_string(tmpdir) cwd = os.getcwd() os.chdir(tmpdir) operating_system = platform.system() + ' ' + platform.machine() operating_system += ' ' + ' '.join(platform.dist()) python = platform.python_version() + ' ' + platform.python_compiler() python += ' ' + ' '.join(platform.architecture()) if mpi.rank == 0: print('python %s on %s' % (python, operating_system)) print('Running tests in %s' % tmpdir) print('Jobs: %d, Cores: %d, debug-mode: %r' % (opt.jobs, mpi.size, debug)) failed = TestRunner(tests, jobs=opt.jobs, show_output=opt.show_output).run() os.chdir(cwd) if mpi.rank == 0: if len(failed) > 0: open('failed-tests.txt', 'w').write('\n'.join(failed) + '\n') elif not opt.keep_tmpdir: os.system('rm -rf ' + tmpdir) return len(failed)
def run(): parser = OptionParser(usage='%prog [options] [tests]', version='%prog 0.1') parser.add_option('-x', '--exclude', type='string', default=None, help='Exclude tests (comma separated list of tests).', metavar='test1.py,test2.py,...') parser.add_option('-f', '--run-failed-tests-only', action='store_true', help='Run failed tests only.') parser.add_option('--from', metavar='TESTFILE', dest='from_test', help='Run remaining tests, starting from TESTFILE') parser.add_option('--after', metavar='TESTFILE', dest='after_test', help='Run remaining tests, starting after TESTFILE') parser.add_option('--range', type='string', default=None, help='Run tests in range test_i.py to test_j.py ' + '(inclusive)', metavar='test_i.py,test_j.py') parser.add_option('-j', '--jobs', type='int', default=1, help='Run JOBS threads.') parser.add_option('--reverse', action='store_true', help=('Run tests in reverse order (less overhead with ' 'multiple jobs)')) parser.add_option('-k', '--keep-temp-dir', action='store_true', dest='keep_tmpdir', help='Do not delete temporary files.') parser.add_option('-d', '--directory', help='Run test in this directory') parser.add_option('-s', '--show-output', action='store_true', help='Show standard output from tests.') opt, tests = parser.parse_args() if len(tests) == 0: from gpaw.test import tests if opt.reverse: tests.reverse() if opt.run_failed_tests_only: tests = [line.strip() for line in open('failed-tests.txt')] exclude = [] if opt.exclude is not None: exclude += opt.exclude.split(',') if opt.from_test: fromindex = tests.index(opt.from_test) tests = tests[fromindex:] if opt.after_test: index = tests.index(opt.after_test) + 1 tests = tests[index:] if opt.range: # default start(stop) index is first(last) test indices = opt.range.split(',') try: start_index = tests.index(indices[0]) except ValueError: start_index = 0 try: stop_index = tests.index(indices[1]) + 1 except ValueError: stop_index = len(tests) tests = tests[start_index:stop_index] if opt.jobs > 1: exclude.append('maxrss.py') for test in exclude: if test in tests: tests.remove(test) from gpaw.test import TestRunner if mpi.world.size > 8: if mpi.rank == 0: message = '!!!!!!!\n' \ 'GPAW regression test suite was not designed to run on more\n' \ 'than 8 MPI tasks. Re-run test suite using 1, 2, 4 or 8 MPI\n' \ 'tasks instead.' warnings.warn(message, RuntimeWarning) old_hooks = hooks.copy() hooks.clear() if mpi.rank == 0: if opt.directory is None: tmpdir = tempfile.mkdtemp(prefix='gpaw-test-') else: tmpdir = opt.directory if os.path.isdir(tmpdir): opt.keep_tmpdir = True else: os.mkdir(tmpdir) else: tmpdir = None tmpdir = mpi.broadcast_string(tmpdir) cwd = os.getcwd() os.chdir(tmpdir) operating_system = platform.system() + ' ' + platform.machine() operating_system += ' ' + ' '.join(platform.dist()) python = platform.python_version() + ' ' + platform.python_compiler() python += ' ' + ' '.join(platform.architecture()) if mpi.rank == 0: print('python %s on %s' % (python, operating_system)) print('Running tests in %s' % tmpdir) print('Jobs: %d, Cores: %d, debug-mode: %r' % (opt.jobs, mpi.size, debug)) failed = TestRunner(tests, jobs=opt.jobs, show_output=opt.show_output).run() os.chdir(cwd) if mpi.rank == 0: if len(failed) > 0: open('failed-tests.txt', 'w').write('\n'.join(failed) + '\n') elif not opt.keep_tmpdir: os.system('rm -rf ' + tmpdir) hooks.update(old_hooks.items()) return len(failed)
from gpaw.test import TestRunner old_hooks = hooks.copy() hooks.clear() if mpi.rank == 0: if opt.directory is None: tmpdir = tempfile.mkdtemp(prefix='gpaw-test-') else: tmpdir = opt.directory if os.path.isdir(tmpdir): opt.keep_tmpdir = True else: os.mkdir(tmpdir) else: tmpdir = None tmpdir = mpi.broadcast_string(tmpdir) cwd = os.getcwd() os.chdir(tmpdir) if mpi.rank == 0: print 'Running tests in', tmpdir failed = TestRunner(tests, jobs=opt.jobs, show_output=opt.show_output).run() os.chdir(cwd) if mpi.rank == 0: if len(failed) > 0: open('failed-tests.txt', 'w').write('\n'.join(failed) + '\n') elif not opt.keep_tmpdir: os.system('rm -rf ' + tmpdir) hooks.update(old_hooks.items())
def run(): description = ( "Run the GPAW test suite. The test suite can be run in " "parallel with MPI through gpaw-python. The test suite " "supports 1, 2, 4 or 8 CPUs although some tests are " "skipped for some parallelizations. If no TESTs are " "given, run all tests supporting the parallelization." ) parser = OptionParser(usage="%prog [OPTION...] [TEST...]", description=description, version="%%prog %s" % version) parser.add_option( "-x", "--exclude", type="string", default=None, help="Exclude tests (comma separated list of tests).", metavar="test1.py,test2.py,...", ) parser.add_option("-f", "--run-failed-tests-only", action="store_true", help="Run failed tests only.") parser.add_option( "--from", metavar="TESTFILE", dest="from_test", help="Run remaining tests, starting from TESTFILE" ) parser.add_option( "--after", metavar="TESTFILE", dest="after_test", help="Run remaining tests, starting after TESTFILE" ) parser.add_option( "--range", type="string", default=None, help="Run tests in range test_i.py to test_j.py " "(inclusive)", metavar="test_i.py,test_j.py", ) parser.add_option( "-j", "--jobs", type="int", default=1, help="Run JOBS threads. Each test will be executed " "in serial by one thread. This option cannot be used " "for parallelization together with MPI.", ) parser.add_option( "--reverse", action="store_true", help=("Run tests in reverse order (less overhead with " "multiple jobs)") ) parser.add_option( "-k", "--keep-temp-dir", action="store_true", dest="keep_tmpdir", help="Do not delete temporary files." ) parser.add_option("-d", "--directory", help="Run test in this directory") parser.add_option("-s", "--show-output", action="store_true", help="Show standard output from tests.") opt, tests = parser.parse_args() if len(tests) == 0: from gpaw.test import tests if opt.reverse: tests.reverse() if opt.run_failed_tests_only: tests = [line.strip() for line in open("failed-tests.txt")] exclude = [] if opt.exclude is not None: exclude += opt.exclude.split(",") if opt.from_test: fromindex = tests.index(opt.from_test) tests = tests[fromindex:] if opt.after_test: index = tests.index(opt.after_test) + 1 tests = tests[index:] if opt.range: # default start(stop) index is first(last) test indices = opt.range.split(",") try: start_index = tests.index(indices[0]) except ValueError: start_index = 0 try: stop_index = tests.index(indices[1]) + 1 except ValueError: stop_index = len(tests) tests = tests[start_index:stop_index] if opt.jobs > 1: exclude.append("maxrss.py") for test in exclude: if test in tests: tests.remove(test) from gpaw.test import TestRunner if mpi.world.size > 8: if mpi.rank == 0: message = ( "!!!!!!!\n" "GPAW regression test suite was not designed to run on more\n" "than 8 MPI tasks. Re-run test suite using 1, 2, 4 or 8 MPI\n" "tasks instead." ) warnings.warn(message, RuntimeWarning) old_hooks = hooks.copy() hooks.clear() if mpi.rank == 0: if opt.directory is None: tmpdir = tempfile.mkdtemp(prefix="gpaw-test-") else: tmpdir = opt.directory if os.path.isdir(tmpdir): opt.keep_tmpdir = True else: os.mkdir(tmpdir) else: tmpdir = None tmpdir = mpi.broadcast_string(tmpdir) cwd = os.getcwd() os.chdir(tmpdir) operating_system = platform.system() + " " + platform.machine() operating_system += " " + " ".join(platform.dist()) python = platform.python_version() + " " + platform.python_compiler() python += " " + " ".join(platform.architecture()) if mpi.rank == 0: print("python %s on %s" % (python, operating_system)) print("Running tests in %s" % tmpdir) print("Jobs: %d, Cores: %d, debug-mode: %r" % (opt.jobs, mpi.size, debug)) failed = TestRunner(tests, jobs=opt.jobs, show_output=opt.show_output).run() os.chdir(cwd) if mpi.rank == 0: if len(failed) > 0: open("failed-tests.txt", "w").write("\n".join(failed) + "\n") elif not opt.keep_tmpdir: os.system("rm -rf " + tmpdir) hooks.update(old_hooks.items()) return len(failed)
def main(args): if len(args.tests) == 0: from gpaw.test import tests else: tests = args.tests if args.list: mydir, _ = os.path.split(__file__) for test in tests: print(os.path.join(mydir, test)) return if args.reverse: tests.reverse() if args.run_failed_tests_only: tests = [line.strip() for line in open('failed-tests.txt')] exclude = [] if args.exclude is not None: exclude += args.exclude.split(',') if args.from_test: fromindex = tests.index(args.from_test) tests = tests[fromindex:] if args.after_test: index = tests.index(args.after_test) + 1 tests = tests[index:] if args.range: # default start(stop) index is first(last) test indices = args.range.split(',') try: start_index = tests.index(indices[0]) except ValueError: start_index = 0 try: stop_index = tests.index(indices[1]) + 1 except ValueError: stop_index = len(tests) tests = tests[start_index:stop_index] if args.jobs > 1: exclude.append('maxrss.py') for test in exclude: if test in tests: tests.remove(test) from gpaw.test import TestRunner if mpi.world.size > 8: if mpi.rank == 0: message = ( '!!!!!!!\n' 'GPAW regression test suite was not designed to run on more\n' 'than 8 MPI tasks. Re-run test suite using 1, 2, 4 or 8 MPI\n' 'tasks instead.') warnings.warn(message, RuntimeWarning) if mpi.rank == 0: if args.directory is None: tmpdir = tempfile.mkdtemp(prefix='gpaw-test-') else: tmpdir = args.directory if os.path.isdir(tmpdir): args.keep_tmpdir = True else: os.mkdir(tmpdir) else: tmpdir = None tmpdir = mpi.broadcast_string(tmpdir) cwd = os.getcwd() os.chdir(tmpdir) if mpi.rank == 0: info() print('Running tests in', tmpdir) print('Jobs: {}, Cores: {}, debug-mode: {}' .format(args.jobs, mpi.size, debug)) failed = TestRunner(tests, jobs=args.jobs, show_output=args.show_output).run() os.chdir(cwd) mpi.world.barrier() # syncronize before removing tmpdir if mpi.rank == 0: if len(failed) > 0: open('failed-tests.txt', 'w').write('\n'.join(failed) + '\n') if not args.keep_tmpdir: os.system('rm -rf ' + tmpdir) return failed