def wait_tests(self, working_dir: Path, timeout=5): """Wait on all the tests under the given path to complete. :param working_dir: The path to a working directory. :param timeout: How long to wait before giving up. """ def is_complete(path: Path): """Return True if test is complete.""" return (path / TestRun.COMPLETE_FN).exists() runs_dir = working_dir / 'test_runs' end_time = time.time() + timeout while time.time() < end_time: completed = [ is_complete(test) for test in dir_db.select(runs_dir).paths ] if not completed: self.fail("No tests started.") if all(completed): break else: time.sleep(0.1) continue else: raise TimeoutError("Waiting on tests: {}".format( test.name for test in dir_db.select(runs_dir).paths if is_complete(test)))
def test_spec_perms(self): """Check that test specific permissions work.""" env = os.environ.copy() env['PAV_CONFIG_DIR'] = self.config_dir.as_posix() cmd = [(self.PAV_ROOT_DIR/'bin/pav').as_posix(), 'run', 'perm.*'] proc = sp.Popen(cmd, env=env, stdout=sp.PIPE, stderr=sp.STDOUT) try: if proc.wait(10) != 0: out = proc.stdout.read() out = out.decode() self.fail("Error running command.\n{}".format(out)) except sp.TimeoutExpired: self.fail() self.wait_tests(self.working_dir) perms = { 'base': (grp.getgrgid(os.getgid()), 0o007), 'spec_perms1': (self.alt_group, 0o022), 'spec_perms2': (self.alt_group2, 0o002), } for test_path in dir_db.select(self.working_dir / 'test_runs')[0]: with (test_path/'config').open() as config_file: test_config = yaml.load(config_file) name = test_config['name'] group, umask = perms[name] self.check_perms(test_path, group, umask)
def from_id(cls, pav_cfg, sid: str): """Load a series object from the given id, along with all of its associated tests. :raises TestSeriesError: From invalid series id or path. """ sid = cls.sid_to_id(sid) series_path = pav_cfg.working_dir / 'series' series_path = dir_db.make_id_path(series_path, sid) if not series_path.exists(): raise TestSeriesError("No such series found: '{}' at '{}'".format( sid, series_path)) logger = logging.getLogger(cls.LOGGER_FMT.format(sid)) tests = [] for path in dir_db.select(series_path): try: test_id = int(path.name) except ValueError: logger.info("Bad test id in series from dir '%s'", path) continue try: tests.append(TestRun.load(pav_cfg, test_id=test_id)) except TestRunError as err: logger.info("Error loading test %s: %s", test_id, err.args[0]) return cls(pav_cfg, tests, _id=sid)
def list_series_tests(pav_cfg, sid: str): """Return a list of paths to test run directories for the given series id. :raises TestSeriesError: If the series doesn't exist.""" series_path = path_from_id(pav_cfg, sid) if not series_path.exists(): raise TestSeriesError("No such test series '{}'. Looked in {}.".format( sid, series_path)) return dir_db.select(series_path).paths
def _series_cmd(self, pav_cfg, args): """Print info on each series.""" series_attrs = { key: SeriesInfo.attr_doc(key) for key in SeriesInfo.list_attrs() } if args.show_fields: for field, doc in series_attrs.items(): output.fprint(field, '-', doc, file=self.outfile) return 0 fields, mode = self.get_fields( fields_arg=args.out_fields, mode_arg=args.output_mode, default_single_field='sid', default_fields=self.SERIES_LONG_FIELDS, avail_fields=list(series_attrs.keys()), ) series_filter = filters.make_series_filter(complete=args.complete, incomplete=args.incomplete, newer_than=args.newer_than, older_than=args.older_than, sys_name=args.sys_name) series_order, ascending = filters.get_sort_opts( sort_name=args.sort_by, choices=filters.SERIES_SORT_FUNCS) series = dir_db.select( id_dir=pav_cfg.working_dir / 'series', filter_func=series_filter, transform=SeriesInfo, order_func=series_order, order_asc=ascending, ) self.write_output( mode=mode, rows=[sinfo.attr_dict() for sinfo in series], fields=fields, header=args.header, vsep=args.vsep, wrap=args.wrap, )
def _get_used_build_paths(tests_dir: Path) -> set: """Generate a set of all build paths currently used by one or more test runs.""" used_builds = set() for path in dir_db.select(tests_dir).paths: build_origin_symlink = path / 'build_origin' build_origin = None if (build_origin_symlink.exists() and build_origin_symlink.is_symlink() and utils.resolve_path(build_origin_symlink).exists()): build_origin = build_origin_symlink.resolve() if build_origin is not None: used_builds.add(build_origin.name) return used_builds
def from_id(cls, pav_cfg, sid: str, outfile: TextIO = StringIO(), errfile: TextIO = StringIO()): """Load a series object from the given id, along with all of its associated tests. :raises TestSeriesError: From invalid series id or path.""" sid = cls.sid_to_id(sid) series_path = pav_cfg.working_dir / 'series' series_path = dir_db.make_id_path(series_path, sid) if not series_path.exists(): raise TestSeriesError("No such series found: '{}' at '{}'".format( sid, series_path)) logger = logging.getLogger(cls.LOGGER_FMT.format(sid)) tests = [] for path in dir_db.select(series_path).paths: try: test_id = int(path.name) except ValueError: series_info_files = [ SERIES_OUT_FN, SERIES_PGID_FN, CONFIG_FN, DEPENDENCY_FN ] if path.name not in series_info_files: logger.info("Bad test id in series from dir '%s'", path) continue try: tests.append(TestRun.load(pav_cfg, test_id=test_id)) except TestRunError as err: logger.info("Error loading test %s: %s", test_id, err.args[0]) return cls(pav_cfg, tests, _id=sid, outfile=outfile, errfile=errfile)
def get_latest_tests(pav_cfg, limit): """Returns ID's of latest test given a limit :param pav_cfg: Pavilion config file :param int limit: maximum size of list of test ID's :return: list of test ID's :rtype: list(int) """ test_dir_list = [] runs_dir = pav_cfg.working_dir / 'test_runs' for test_dir in dir_db.select(runs_dir)[0]: mtime = test_dir.stat().st_mtime try: test_id = int(test_dir.name) except ValueError: continue test_dir_list.append((mtime, test_id)) test_dir_list.sort() return [test_id for _, test_id in test_dir_list[-limit:]]
def delete_unused(tests_dir: Path, builds_dir: Path, verbose: bool = False) \ -> (int, List[str]): """Delete all the build directories, that are unused by any test run. :param tests_dir: The test_runs directory path object. :param builds_dir: The builds directory path object. :param verbose: Print :return int count: The number of builds that were removed. """ used_build_paths = _get_used_build_paths(tests_dir) def filter_builds(build_path: Path) -> bool: """Return whether a build is not used.""" return build_path.name not in used_build_paths count = 0 lock_path = builds_dir.with_suffix('.lock') msgs = [] with lockfile.LockFile(lock_path) as lock: for path in dir_db.select(builds_dir, filter_builds, fn_base=16)[0]: lock.renew() try: shutil.rmtree(path.as_posix()) path.with_suffix(TestBuilder.FINISHED_SUFFIX).unlink() except OSError as err: msgs.append("Could not remove build {}: {}".format(path, err)) continue count += 1 if verbose: msgs.append('Removed build {}.'.format(path.name)) return count, msgs
def _test_runs_cmd(self, pav_cfg, args): """ :param pav_cfg: :param args: :return: """ if args.show_fields: for field in TestAttributes.list_attrs(): output.fprint(field, '-', TestAttributes.attr_doc(field), file=self.outfile) return 0 fields, mode = self.get_fields( fields_arg=args.out_fields, mode_arg=args.output_mode, default_single_field='id', default_fields=self.RUN_LONG_FIELDS, avail_fields=TestAttributes.list_attrs()) filter_func = filters.make_test_run_filter( complete=args.complete, failed=args.failed, incomplete=args.incomplete, name=args.name, newer_than=args.newer_than, older_than=args.older_than, passed=args.passed, show_skipped=args.show_skipped, sys_name=args.sys_name, user=args.user, ) order_func, ascending = filters.get_sort_opts(args.sort_by, filters.TEST_SORT_FUNCS) if args.series: picked_runs = [] for series_id in args.series: try: picked_runs.extend( TestSeries.list_series_tests(pav_cfg=pav_cfg, sid=series_id)) except TestSeriesError as err: output.fprint("Invalid test series '{}'.\n{}".format( series_id, err.args[0]), color=output.RED, file=self.errfile) return errno.EINVAL runs = dir_db.select_from( paths=picked_runs, transform=TestAttributes, filter_func=filter_func, order_func=order_func, order_asc=ascending, limit=args.limit, ) else: runs = dir_db.select( id_dir=pav_cfg.working_dir / 'test_runs', transform=TestAttributes, filter_func=filter_func, order_func=order_func, order_asc=ascending, limit=args.limit, ) self.write_output( mode=mode, rows=[run.attr_dict(include_empty=False) for run in runs], fields=fields, header=args.header, vsep=args.vsep, wrap=args.wrap, )
def arg_filtered_tests(pav_cfg, args: argparse.Namespace, verbose: TextIO = None) -> List[int]: """Search for test runs that match based on the argument values in args, and return a list of matching test id's. Note: I know this violates the idea that we shouldn't be passing a generic object around and just using random bits of an undefined interface. BUT: 1. The interface is well defined, by `filters.add_test_filter_args`. 2. All of the used bits are *ALWAYS* used, so any errors will pop up immediately in unit tests. :param pav_cfg: The Pavilion config. :param args: An argument namespace with args defined by `filters.add_test_filter_args`, plus one additional `tests` argument that should contain a list of test id's, series id's, or the 'last' keyword. :param verbose: A file like object to report test search status. :return: A list of test id ints. """ limit = args.limit filter_func = filters.make_test_run_filter( complete=args.complete, incomplete=args.incomplete, passed=args.passed, failed=args.failed, name=args.name, user=args.user, sys_name=args.sys_name, older_than=args.older_than, newer_than=args.newer_than, show_skipped=args.show_skipped, ) order_func, order_asc = filters.get_sort_opts( sort_name=args.sort_by, choices=filters.TEST_SORT_FUNCS, ) if args.tests: test_paths = test_list_to_paths(pav_cfg, args.tests) if args.disable_filter: test_ids = dir_db.paths_to_ids(test_paths) else: tests = dir_db.select_from(paths=test_paths, transform=test_run_attr_transform, filter_func=filter_func, order_func=order_func, order_asc=order_asc, limit=limit).data test_ids = [test['id'] for test in tests] else: tests = dir_db.select(id_dir=pav_cfg.working_dir / 'test_runs', transform=test_run_attr_transform, filter_func=filter_func, order_func=order_func, order_asc=order_asc, verbose=verbose, limit=limit).data test_ids = [test['id'] for test in tests] return test_ids
def run(self, pav_cfg, args): """Run this command.""" cutoff_date = datetime.today() - timedelta(days=30) if args.older_than: args.older_than = args.older_than.split() if len(args.older_than) == 2: if not args.older_than[0].isdigit(): raise commands.CommandError( "Invalid `--older-than` value." ) if args.older_than[1] in ['minute', 'minutes']: cutoff_date = datetime.today() - timedelta( minutes=int(args.older_than[0])) elif args.older_than[1] in ['hour', 'hours']: cutoff_date = datetime.today() - timedelta( hours=int(args.older_than[0])) elif args.older_than[1] in ['day', 'days']: cutoff_date = datetime.today() - timedelta( days=int(args.older_than[0])) elif args.older_than[1] in ['week', 'weeks']: cutoff_date = datetime.today() - timedelta( weeks=int(args.older_than[0])) elif args.older_than[1] in ['month', 'months']: cutoff_date = datetime.today() - timedelta( days=30*int(args.older_than[0])) elif len(args.older_than) == 3: date = ' '.join(args.older_than) try: cutoff_date = datetime.strptime(date, '%b %d %Y') except (TypeError, ValueError): output.fprint("{} is not a valid date." .format(args.older_than), file=self.errfile, color=output.RED) return errno.EINVAL else: output.fprint( "Invalid `--older-than` value.", file=self.errfile, color=output.RED ) return errno.EINVAL elif args.all: cutoff_date = datetime.today() tests_dir = pav_cfg.working_dir / 'test_runs' # type: Path series_dir = pav_cfg.working_dir / 'series' # type: Path build_dir = pav_cfg.working_dir / 'builds' # type: Path removed_tests = 0 removed_series = 0 removed_builds = 0 used_builds = set() # Clean Tests output.fprint("Removing Tests...", file=self.outfile, color=output.GREEN) for test_path in tests_dir.iterdir(): test = test_path.name try: int(test) except ValueError: # Skip files that aren't numeric continue # Skip non-directories. if not test_path.is_dir(): continue try: test_time = datetime.fromtimestamp(test_path.lstat().st_mtime) except FileNotFoundError: # The file no longer exists. This is a race condition. continue build_origin_symlink = test_path/'build_origin' # 'None' will probably end up in used_builds, but that's ok. build_origin = None if (build_origin_symlink.exists() and build_origin_symlink.is_symlink() and build_origin_symlink.resolve().exists()): build_origin = build_origin_symlink.resolve() if test_time > cutoff_date: used_builds.add(build_origin) continue state = None try: test_obj = TestRun.load(pav_cfg, int(test)) state = test_obj.status.current().state except (TestRunError, TestRunNotFoundError): # It's ok if this happens, we'll still remove by date. # It is possible the test isn't completely written (a race # condition). pass except PermissionError as err: err = str(err).split("'") output.fprint("Permission Error: {} cannot be removed" .format(err[1]), file=self.errfile, color=31) continue if state in (STATES.RUNNING, STATES.SCHEDULED): used_builds.add(build_origin) continue try: shutil.rmtree(test_path.as_posix()) if args.verbose: output.fprint("Removed test {}".format(test_path), file=self.outfile) removed_tests += 1 except OSError as err: output.fprint( "Could not remove test {}: {}" .format(test_path, err), color=output.YELLOW, file=self.errfile) # Start numbering from the beginning again dir_db.reset_pkey(tests_dir) # Clean Series output.fprint("Removing Series...", file=self.outfile, color=output.GREEN) for series in dir_db.select(series_dir): for test in series.iterdir(): if (test.is_symlink() and test.exists() and test.resolve().exists()): # This test is still present, so keep the series. break else: # This series has no remaining tests, we can delete it. try: shutil.rmtree(series.as_posix()) removed_series += 1 except OSError as err: output.fprint( "Could not remove series {}: {}" .format(series, err), color=output.YELLOW, file=self.errfile ) # Start numbering from the beginning again dir_db.reset_pkey(series_dir) # Clean Builds output.fprint("Removing Builds...", file=self.outfile, color=output.GREEN) for build in build_dir.iterdir(): if build in used_builds: continue try: shutil.rmtree(build.as_posix()) if args.verbose: output.fprint("Removed build", build, file=self.outfile) except OSError as err: output.fprint( "Could not remove build {}: {}" .format(build, err), color=output.YELLOW, file=self.errfile) output.fprint("Removed {tests} tests, {series} series, and {builds} " "builds." .format(tests=removed_tests, series=removed_series, builds=removed_builds), color=output.GREEN, file=self.outfile) return 0
def __init__(self, path: Path): self.path = path self._complete = None self._tests = [tpath for tpath in dir_db.select(self.path)]