def run_tests(self, tests, nproc=9, parallel=True, interactive=False, name=None): """run tests. By default, we run them in parallel, unless serial is selected. Arguments: - parallel (bool) : run tasks in parallel that are able (default is True) - nproc (int) : number of processes to run - cleanup (bool) : clean up files/dir generated with tmp_path, tmp_dir - name (str) : the name of a test to interact with - interactive (bool) : run jobs interactively (for debugging) not available for parallel jobs. """ # Parallel tests cannot be interactive if parallel and not interactive: self._run_parallel(tests, nproc=nproc) else: total = len(tests) progress = 1 for _, task in tests.items(): prefix = "[%s:%s/%s]" % (task.name, progress, total) if self.show_progress: bot.show_progress(progress, total, length=35, prefix=prefix) else: bot.info("Running %s" % prefix) # Should this be interactive? is_interactive = interactive if name is not None and interactive: if not task.name.startswith(name): is_interactive = False # Run the task, update results with finished object task.run(interactive=is_interactive) progress += 1 return tests
def run(self, interactive=False, cleanup=None): """run an isolated test, and store the return code and result with the tester here. Arguments: - interactive (bool) : run interactively for shell to debug - cleanup (bool) : remove any temporary directories or files (True) """ if not self.show_progress: bot.info(f"Running test {self.name}") # Should we clean up temporary files produced? if cleanup is not None: self.cleanup_temp = cleanup # [passed, result, out, err, raises] passed, result, out, err, raises = test_basic( funcname=self.get_funcname(), module=self.module, func=self.func, filename=self.filename, metrics=self.params.get("metrics", []), args=self.params.get("args", {}), returns=self.params.get("returns"), interactive=interactive, ) self.success = passed self.result = result self.out = out self.err = err self.raises = raises # Finish by checking output self.check_output() if self.cleanup_temp: self.cleanup()
def run(self, tests, cleanup=True): """run will execute a test for each entry in the list of tests. the result of the test, and error codes, are saved with the test. Arguments: - tests (gridtest.main.test.GridTest) : the GridTest object """ # Keep track of some progress for the user total = len(tests) progress = 1 # Cut out early if no tests if not tests: return results = [] to_cleanup = [] try: prefix = "[%s/%s]" % (progress, total) if self.show_progress: bot.show_progress(0, total, length=35, prefix=prefix) pool = multiprocessing.Pool(self.workers, init_worker) self.start() for name, task in tests.items(): # If a class is returned, needs to be in path too sys.path.insert(0, os.path.dirname(task.filename)) # Get the function name from the tester params = { "funcname": task.get_funcname(), "module": task.module, "filename": task.filename, "metrics": task.params.get("metrics", []), "args": task.params.get("args", {}), "returns": task.params.get("returns"), } if not self.show_progress: bot.info(f"Running test {name}") result = pool.apply_async(multi_wrapper, multi_package(test_basic, [params])) # result returns [passed, result, out, error] # Store the test with the result results.append((task, result)) while results: pair = results.pop() test, result = pair result.wait() if self.show_progress: bot.show_progress(progress, total, length=35, prefix=prefix) progress += 1 prefix = "[%s/%s]" % (progress, total) # Update the task with the result passed, result, out, err, raises = result.get() test.out = out test.err = err test.success = passed test.result = result test.raises = raises self.end() pool.close() pool.join() except (KeyboardInterrupt, SystemExit): bot.error("Keyboard interrupt detected, terminating workers!") pool.terminate() sys.exit(1) except: bot.exit("Error running task.")
def save(self, testfile): """Save the runner.config to an output yaml file. """ bot.info(f"Writing {self} to {testfile}") write_yaml(self.config, testfile)