def ranked_mirrors(self): """ A list of :class:`CandidateMirror` objects (ordered from best to worst). The value of this property is computed by concurrently testing the mirrors in :attr:`available_mirrors` for the following details: - availability (:attr:`~CandidateMirror.is_available`) - connection speed (:attr:`~CandidateMirror.bandwidth`) - update status (:attr:`~CandidateMirror.is_updating`) The number of mirrors to test is limited to :attr:`max_mirrors` and you can change the number of simultaneous HTTP connections allowed by setting :attr:`concurrency`. """ timer = Timer() # Sort the candidates based on the currently available information # (and transform the input argument into a list in the process). mirrors = sorted(self.available_mirrors, key=lambda c: c.sort_key, reverse=True) # Limit the number of candidates to a reasonable number? if self.max_mirrors and len(mirrors) > self.max_mirrors: mirrors = mirrors[:self.max_mirrors] # Prepare the Release.gpg URLs to fetch. mapping = dict((c.release_gpg_url, c) for c in mirrors) num_mirrors = pluralize(len(mapping), "mirror") logger.info("Checking %s for availability and performance ..", num_mirrors) # Concurrently fetch the Release.gpg files. with AutomaticSpinner(label="Checking mirrors"): for url, data, elapsed_time in fetch_concurrent( mapping.keys(), concurrency=self.concurrency): candidate = mapping[url] candidate.release_gpg_contents = data candidate.release_gpg_latency = elapsed_time # Concurrently check for Archive-Update-in-Progress markers. update_mapping = dict((c.archive_update_in_progress_url, c) for c in mirrors if c.is_available) logger.info("Checking %s for Archive-Update-in-Progress marker ..", pluralize(len(update_mapping), "mirror")) with AutomaticSpinner(label="Checking mirrors"): for url, data, elapsed_time in fetch_concurrent( update_mapping.keys(), concurrency=self.concurrency): update_mapping[url].is_updating = data is not None # Sanity check our results. mirrors = list(mapping.values()) logger.info("Finished checking %s (took %s).", num_mirrors, timer) if not any(c.is_available for c in mirrors): raise Exception("It looks like all %s are unavailable!" % num_mirrors) if all(c.is_updating for c in mirrors): logger.warning("It looks like all %s are being updated?!", num_mirrors) return sorted(mirrors, key=lambda c: c.sort_key, reverse=True)
def get_job_groups(browser, root_url, args): if args.job_group_urls: job_group_urls = args.job_group_urls.split(',') log.info("Acting on specified job group URL(s): %s" % ', '.join(job_group_urls)) job_groups = {i: url for i, url in enumerate(job_group_urls)} else: if args.no_progress or not humanfriendly_available: soup = browser.get_soup(root_url) else: with AutomaticSpinner(label='Retrieving job groups'): soup = browser.get_soup(root_url) job_groups = { i.text: absolute_url(root_url, i) for i in soup.select('h2 a[href^="/group_overview/"]') } log.debug("job groups found: %s" % job_groups.keys()) if args.job_groups: job_pattern = re.compile('(%s)' % '|'.join(args.job_groups.split(','))) job_groups = { k: v for k, v in iteritems(job_groups) if job_pattern.search(k) } log.info("Job group URL for %s: %s" % (args.job_groups, job_groups)) return SortedDict(job_groups)
def generate_report(args): verbose_to_log = { 0: logging.CRITICAL, 1: logging.ERROR, 2: logging.WARN, 3: logging.INFO, 4: logging.DEBUG } logging_level = logging.DEBUG if args.verbose > 4 else verbose_to_log[ args.verbose] log.setLevel(logging_level) log.debug("args: %s" % args) args.output_state_results = True if args.verbose > 1 else args.output_state_results root_url = urljoin(args.host, args.base_url) browser = Browser(args, root_url) job_groups = get_job_groups(browser, root_url, args) assert not (args.builds and len(job_groups) > 1 ), "builds option and multiple job groups not supported" assert len( job_groups ) > 0, "No job groups were found, maybe misspecified '--job-groups'?" # for each job group on openqa.opensuse.org def one_report(job_group_url): try: log.info("Processing '%s'" % v) return generate_product_report(browser, job_group_url, root_url, args) except NotEnoughBuildsError: return "Not enough finished builds found" label = 'Gathering data and processing report' progress = 0 report = '' def next_label(progress): return '%s %s %%' % (label, progress * 100 / len(job_groups.keys())) for k, v in iteritems(job_groups): if args.no_progress or not humanfriendly_available: report += '# %s\n\n%s' % (k, one_report(v)) + '\n---\n' else: with AutomaticSpinner(label=next_label(progress)): report += '# %s\n\n%s' % (k, one_report(v)) + '\n---\n' progress += 1 if not args.no_progress: print("\n%s" % next_label(progress)) # It's nice to see 100%, too :-) return report
def run(): global gcinfo print(f"Initializing AthenaOS v{version}...") with AutomaticSpinner(label="Loading Athena..."): gcinfo = initCreateCore() initCacheProcesses(gcinfo) initCacheCommands(gcinfo) try: run_cli() except KeyboardInterrupt: print("Closing.") #print(gcinfo.threads) for item in gcinfo.threads: #print(f"Stopping thread {item}") try: gcinfo.threads[item]['object'].process.terminate() except: pass except Exception as e: print(e) run_cli()
def __init__(self, browser, args, root_url, job_groups): """Create openQA review report.""" self.browser = browser self.args = args self.root_url = root_url self.job_groups = job_groups self._label = 'Gathering data and processing report' self._progress = 0 self.report = SortedDict() for k, v in iteritems(job_groups): log.info("Processing '%s'" % v) if args.no_progress or not humanfriendly_available: self.report[k] = self._one_report(v) else: with AutomaticSpinner(label=self._next_label(self._progress)): self.report[k] = self._one_report(v) self._progress += 1 if not args.no_progress: sys.stderr.write("\r%s\n" % self._next_label( self._progress)) # It's nice to see 100%, too :-)
def collect_packages(archives, directory, prompt=True, cache=None, concurrency=None): """ Interactively copy packages and their dependencies. :param archives: An iterable of strings with the filenames of one or more ``*.deb`` files. :param directory: The pathname of a directory where the package archives and dependencies should be copied to (a string). :param prompt: :data:`True` (the default) to ask confirmation from the operator (using a confirmation prompt rendered on the terminal), :data:`False` to skip the prompt. :param cache: The :class:`.PackageCache` to use (defaults to :data:`None`). :param concurrency: Override the number of concurrent processes (defaults to the number of `archives` given or to the value of :func:`multiprocessing.cpu_count()`, whichever is smaller). :raises: :exc:`~exceptions.ValueError` when no archives are given. When more than one archive is given a :mod:`multiprocessing` pool is used to collect related archives concurrently, in order to speed up the process of collecting large dependency sets. """ archives = list(archives) related_archives = set(map(parse_filename, archives)) if not archives: raise ValueError("At least one package archive is required!") elif len(archives) == 1: # Find the related packages of a single archive. related_archives.update( collect_related_packages(archives[0], cache=cache)) else: # Find the related packages of multiple archives (concurrently). with AutomaticSpinner(label="Collecting related packages"): concurrency = min(len(archives), concurrency or multiprocessing.cpu_count()) pool = multiprocessing.Pool(concurrency) try: arguments = [(archive, cache) for archive in archives] for result in pool.map(collect_packages_worker, arguments, chunksize=1): related_archives.update(result) finally: pool.terminate() # Ignore package archives that are already in the target directory. relevant_archives = set() for archive in related_archives: basename = os.path.basename(archive.filename) if not os.path.isfile(os.path.join(directory, basename)): relevant_archives.add(archive) # Interactively move the package archives. if relevant_archives: relevant_archives = sorted(relevant_archives) pluralized = pluralize(len(relevant_archives), "package archive", "package archives") say("Found %s:", pluralized) for file_to_collect in relevant_archives: say(" - %s", format_path(file_to_collect.filename)) prompt_text = "Copy %s to %s?" % (pluralized, format_path(directory)) if prompt and not prompt_for_confirmation( prompt_text, default=True, padding=False): logger.warning("Not copying archive(s) to %s! (aborted by user)", format_path(directory)) else: # Link or copy the file(s). for file_to_collect in relevant_archives: src = file_to_collect.filename dst = os.path.join(directory, os.path.basename(src)) smart_copy(src, dst) logger.info("Done! Copied %s to %s.", pluralized, format_path(directory)) else: logger.info("Nothing to do! (%s previously copied)", pluralize(len(related_archives), "package archive"))
def render_summary(self): """Render a summary of installed and removable kernel packages on the terminal.""" logger.verbose("Sanity checking meta packages on %s ..", self.context) with AutomaticSpinner(label="Gathering information about %s" % self.context): # Report the installed Linux kernel image meta package(s). if self.installed_image_meta_packages: logger.info( "Found %s installed:", pluralize(len(self.installed_image_meta_packages), "Linux kernel image meta package")) for package in self.installed_image_meta_packages: logger.info(" - %s (%s)", package.name, package.version) if len(self.installed_image_meta_packages) > 1: names = concatenate( pkg.name for pkg in self.installed_image_meta_packages) logger.warning( compact(""" You have more than one Linux kernel image meta package installed ({names}) which means automatic package removal can be unreliable! """, names=names)) logger.verbose( compact(""" I would suggest to stick to one Linux kernel image meta package, preferably the one that matches the newest kernel :-) """)) else: logger.warning( compact(""" It looks like there's no Linux kernel image meta package installed! I hope you've thought about how to handle security updates? """)) # Report the installed Linux kernel header/image package(s). logger.verbose("Checking for removable packages on %s ..", self.context) package_types = ( (self.installed_kernel_packages, "image", True), (self.installed_header_packages, "header", False), ) for collection, label, expected in package_types: if collection: logger.info( "Found %s:", pluralize(len(collection), "installed Linux kernel %s package" % label)) for group in self.installed_package_groups: matching_packages = sorted(package.name for package in group if package in collection) active_group = any( package.name == self.active_kernel_package for package in group) removable_group = (group in self.removable_package_groups) if matching_packages: logger.info( " - %s (%s)", concatenate(matching_packages), ansi_wrap("removable", color='green') if removable_group else ansi_wrap( "the active kernel" if active_group else ("one of %i newest kernels" % self.preserve_count), color='blue'), ) elif expected: logger.warning( "No installed %s packages found, this can't be right?!", label) # Report the removable packages. if self.removable_packages: logger.info( "Found %s that can be removed.", pluralize(len(self.removable_packages), "package"), ) # Report the shell command to remove the packages. logger.verbose("Command to remove packages: %s", ' '.join(self.cleanup_command)) else: logger.info("No packages need to be removed! :-)")