def sack(self): if not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) locked = set() for pat in _read_locklist(): excl = False if pat and pat[0] == '!': pat = pat[1:] excl = True subj = dnf.subject.Subject(pat) pkgs = subj.get_best_query(self.base.sack) if excl: self.base.sack.add_excludes(pkgs) else: locked.update(pkgs.run()) if locked: locked_names = [pkg.name for pkg in locked] all_versions = set(self.base.sack.query().filter(name=locked_names)) other_versions = all_versions.difference(locked) self.base.sack.add_excludes(other_versions)
def _get_deps(sack): requires = {} prov = {} skip = [] available = sack.query().available() for pkg in available: xx = {} for req in pkg.requires: reqname = str(req) if reqname in skip: continue # XXX: https://bugzilla.redhat.com/show_bug.cgi?id=1186721 if reqname.startswith("solvable:"): continue if reqname in prov: provider = prov[reqname] else: provider = available.filter(provides=reqname) if not provider: logger.debug(_("Nothing provides: '%s'"), reqname) skip.append(reqname) continue else: provider = provider[0].name prov[reqname] = provider if provider == pkg.name: xx[provider] = None if provider in xx or provider in skip: continue else: xx[provider] = None requires[pkg.name] = xx.keys() return requires
def _get_source_packages(self, pkgs): """Get list of source rpm names for a list of packages.""" source_pkgs = set() for pkg in pkgs: source_pkgs.add(pkg.sourcerpm) logger.debug(' --> Package : %s Source : %s' % (str(pkg), pkg.sourcerpm)) return list(source_pkgs)
def _di_install(self, package, require): pkgname = self._pkgname(package) if pkgname in self.done \ or require in self.done \ or package in self.rejected: return if self._is_available(package, True): self.done.append(pkgname) if require: self.done.append(require) if "-debuginfo" in pkgname: di = "{0}-{1}:{2}-{3}.{4}".format( pkgname, package.epoch, package.version, package.release, package.arch) else: di = "{0}-debuginfo-{1}:{2}-{3}.{4}".format( pkgname, package.epoch, package.version, package.release, package.arch) self.base.install(di) else: if self._is_available(package, False): di = "{0}-debuginfo.{1}".format(pkgname, package.arch) self.base.install(di) self.done.append(pkgname) if require: self.done.append(require) else: pass for req in package.requires: if str(req).startswith("rpmlib("): continue elif str(req) in self.done: continue elif str(req).find(".so") != -1: provides = self.packages_available.filter(provides=req) for p in provides: if str(p.name) in self.done or p in self.rejected: continue pkgs = self.packages_installed.filter(name=p.name) if len(pkgs) != 0: pkgs_avail = self._is_available(pkgs[0], True) if not pkgs_avail: for x in pkgs: logger.debug( _("Can't find debuginfo package for: {0}-{1}:{2}-{3}.{4}").format( x.name, x.epoch, x.version, x.release, x.arch)) self.rejected.append(x) pkgs = [] else: pkgs = pkgs_avail for pkg in pkgs: self._di_install(pkg, str(req))
def _get_source_packages(self, pkgs): """Get list of source rpm names for a list of packages.""" source_pkgs = set() for pkg in pkgs: if pkg.sourcerpm: source_pkgs.add(pkg.sourcerpm) logger.debug(' --> Package : %s Source : %s', str(pkg), pkg.sourcerpm) else: logger.info(_("No source rpm definded for %s"), str(pkg)) return list(source_pkgs)
def __init__(self, base, cli): super(Spacewalk, self).__init__(base, cli) self.base = base self.cli = cli self.stored_channels_path = os.path.join(self.base.conf.persistdir, STORED_CHANNELS_NAME) self.connected_to_spacewalk = False self.timeout = self.base.conf.timeout self.up2date_cfg = {} self.conf = dnf.conf.Conf() self.read_config(self.conf, PLUGIN_CONF) logger.debug('initialized Spacewalk plugin')
def _get_source_packages(pkgs): """Get list of source rpm names for a list of packages.""" source_pkgs = set() for pkg in pkgs: if pkg.sourcerpm: source_pkgs.add(pkg.sourcerpm) logger.debug(' --> Package : %s Source : %s', str(pkg), pkg.sourcerpm) elif pkg.arch == 'src': source_pkgs.add("%s-%s.src.rpm" % (pkg.name, pkg.evr)) else: logger.info(_("No source rpm defined for %s"), str(pkg)) return list(source_pkgs)
def _get_packages_with_deps(self, pkg_specs, source=False): """Get packages matching pkg_specs and the deps.""" pkgs = self._get_packages(pkg_specs) goal = hawkey.Goal(self.base.sack) for pkg in pkgs: goal.install(pkg) rc = goal.run() if rc: pkgs = goal.list_installs() return pkgs else: logger.debug(_('Error in resolve')) return []
def purge_generated_dir(self): if not path.isdir(self.dir_generated): return count = 0 for f in listdir(self.dir_generated): try: unlink(path.join(self.dir_generated, f)) count += 1 except OSError as e: logger.warning("Failed to remove [%s]: %s", f, e) try: rmdir(self.dir_generated) except OSError: logger.warning("Failed to remove [%s]: %s", self.dir_generated, e) if count > 0: logger.debug("Removed %d generated files from %s", count, self.dir_generated)
def _enable_debug_repos(self): repos = {} for repo in self.base.repos.iter_enabled(): repos[repo.id] = repo for repoid in repos: if repoid.endswith("-rpms"): di = repoid[:-5] + "-debug-rpms" else: di = "{}-debuginfo".format(repoid) if di in repos: continue repo = repos[repoid] for r in self.base.repos: if r == di: logger.debug(_("enabling {}").format(di)) self.base.repos[r].enable()
def migrate_yumdb(self): """Migrate YUMDB data.""" attribute2mandatory = { "changed_by": False, "checksum_data": True, "checksum_type": True, "command_line": False, "from_repo": True, "from_repo_revision": False, "from_repo_timestamp": False, "installed_by": False, "reason": True, "releasever": True} migrated = skipped = 0 logger.info(_("Migrating YUMDB data...")) try: with contextlib.closing(_YumBase()) as yumbase: for pkgtup, pkgid in yumbase.iter_yumdb(logger.warning): nevra = "{0[0]}-{0[3]}-{0[4]}.{0[1]}".format(pkgtup) dnfdata = self.base.yumdb.get_package( pkgtup=pkgtup, pkgid=pkgid) if next(iter(dnfdata), None) is not None: logger.warning("%s found in DNFDB; skipping", nevra) skipped += 1 continue yumdata = yumbase.rpmdb.yumdb.get_package( pkgtup=pkgtup, pkgid=pkgid) for attribute, mandat in attribute2mandatory.items(): try: value = getattr(yumdata, attribute) except AttributeError: lvl = logging.WARNING if mandat else logging.DEBUG msg = _("%s of %s not found") logger.log(lvl, msg, attribute, nevra) continue if isinstance(value, bytes): value = value.decode("utf-8", "replace") if '\ufffd' in value: msg = _( "replacing unknown characters in %s of %s") logger.warning(msg, attribute, nevra) try: setattr(dnfdata, attribute, value) except (OSError, IOError): msg = _("DNFDB access denied") raise dnf.exceptions.Error(msg) logger.debug(_("%s of %s migrated"), attribute, nevra) migrated += 1 finally: logger.info( _("%d YUMDB records found, %d migrated, %d skipped/preserved"), migrated + skipped, migrated, skipped)
def __init__(self, base, cli): super(Spacewalk, self).__init__(base, cli) self.base = base self.cli = cli self.stored_channels_path = os.path.join(self.base.conf.persistdir, STORED_CHANNELS_NAME) self.connected_to_spacewalk = False self.up2date_cfg = {} self.conf = copy(self.base.conf) self.parser = self.read_config(self.conf, PLUGIN_CONF) if "main" in self.parser.sections(): options = self.parser.items("main") for (key, value) in options: setattr(self.conf, key, value) if not self.conf.enabled: return logger.debug('initialized Spacewalk plugin')
def read_installed_langpacks(self): """ Read the installed langpacks file """ if not self.conffile: return [] ret = [] try: conf_fp = open(self.conffile, "r") llist = conf_fp.readlines() conf_fp.close() except (IOError, OSError) as fperror: logger.debug("Error reading file : %s as it does not exist", self.conffile) return [] for item in llist: item = item.strip() ret.append(item) return ret
def __init__(self, base, cli): super(Spacewalk, self).__init__(base, cli) self.base = base self.cli = cli self.stored_channels_path = os.path.join(self.base.conf.persistdir, STORED_CHANNELS_NAME) self.connected_to_spacewalk = False self.up2date_cfg = {} self.conf = copy(self.base.conf) self.parser = self.read_config(self.conf) if "main" in self.parser.sections(): options = self.parser.items("main") for (key, value) in options: self.conf._set_value(key, value, PRIO_PLUGINCONFIG) if not dnf.util.am_i_root(): logger.warning(MUST_BE_ROOT) self.conf.enabled = False if not self.conf.enabled: return logger.debug('initialized Spacewalk plugin') self.activate_channels()
def config(self): super(swidtags, self).config() self.conf = self.read_config(self.base.conf) DEFAULTS = {"main": {}} for s in DEFAULTS: if not self.conf.has_section(s): try: self.conf.addSection(s) except AttributeError: self.conf.add_section(s) for o in DEFAULTS[s]: if not self.conf.has_option(s, o): try: self.conf.setValue(s, o, DEFAULTS[s][o]) except AttributeError: self.conf.set(s, o, DEFAULTS[s][o]) for repo in self.base.repos.iter_enabled(): if hasattr(repo, "add_metadata_type_to_download"): logger.debug("Will ask for SWID tags download for %s", str(repo.baseurl)) repo.add_metadata_type_to_download(self.METADATA_TYPE)
def run_rpm2swidtag_for(self, pkgs): if not pkgs or len(pkgs) < 1: return -1 hostname = platform.uname()[1] try: rpm2swidtag_command = self.conf.get("main", "rpm2swidtag_command") except KeyError: return -2 except Exception as e: if e.__class__.__name__ == "NoOptionError": return -2 raise e logger.debug("Running %s for %s ...", rpm2swidtag_command, pkgs) env = { "_RPM2SWIDTAG_RPMDBPATH": path.join(self.base.conf.installroot, "usr/lib/sysimage/rpm") } if not path.isdir(env["_RPM2SWIDTAG_RPMDBPATH"]): env["_RPM2SWIDTAG_RPMDBPATH"] = path.join(self.base.conf.installroot, "var/lib/rpm") if "PYTHONPATH" in environ: env["PYTHONPATH"] = environ["PYTHONPATH"] ret = run(rpm2swidtag_command.split() + ["--tag-creator", hostname, "--output-dir", path.join(self.dir_generated, ".")] + pkgs, env=env, check=False).returncode self.create_generated_dir() self.create_swidtags_d_symlink() return ret
def sack(self): if not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) excludes_query = self.base.sack.query().filter(empty=True) locked_query = self.base.sack.query().filter(empty=True) locked_names = set() # counter of applied rules [locked_count, excluded_count] count = [0, 0] for pat in _read_locklist(): excl = 0 if pat and pat[0] == '!': pat = pat[1:] excl = 1 possible_nevras = dnf.subject.Subject( pat).get_nevra_possibilities() if possible_nevras: count[excl] += 1 else: logger.error("%s %s", NEVRA_ERROR, pat) continue for nevra in possible_nevras: pat_query = nevra.to_query(self.base.sack) if excl: excludes_query = excludes_query.union(pat_query) else: locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if count[1]: logger.debug(APPLY_EXCLUDE.format(locklist_fn, count[1])) if count[0]: logger.debug(APPLY_LOCK.format(locklist_fn, count[0])) if locked_names: all_versions = self.base.sack.query().filter( name__glob=list(locked_names)) other_versions = all_versions.difference(locked_query) excluded = list( set([ str(x) for x in other_versions.difference(excludes_query) ])) if excluded: print("Available packages ignored because of versionlock: ") for pkg in excluded: print(" ", pkg) excludes_query = excludes_query.union(other_versions) if excludes_query: self.base.sack.add_excludes(excludes_query)
def sack(self): if not self.locking_enabled(): logger.debug(NO_VERSIONLOCK) return excludes_query = self.base.sack.query().filter(empty=True) locked_query = self.base.sack.query().filter(empty=True) locked_names = set() # counter of applied rules [locked_count, excluded_count] count = [0, 0] for pat in _read_locklist(): excl = 0 if pat and pat[0] == '!': pat = pat[1:] excl = 1 possible_nevras = dnf.subject.Subject( pat).get_nevra_possibilities() if possible_nevras: count[excl] += 1 else: logger.error("%s %s", NEVRA_ERROR, pat) continue for nevra in possible_nevras: pat_query = nevra.to_query(self.base.sack) if excl: excludes_query = excludes_query.union(pat_query) else: locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if count[1]: logger.debug(APPLY_EXCLUDE.format(locklist_fn, count[1])) if count[0]: logger.debug(APPLY_LOCK.format(locklist_fn, count[0])) if locked_names: all_versions = self.base.sack.query().filter( name__glob=list(locked_names)) other_versions = all_versions.difference(locked_query) excludes_query = excludes_query.union(other_versions) # exclude also anything that obsoletes the locked versions of packages obsoletes_query = self.base.sack.query().filterm( obsoletes=locked_query) # leave out obsoleters that are also part of locked versions (otherwise the obsoleter package # would not be installable at all) excludes_query = excludes_query.union( obsoletes_query.difference(locked_query)) excludes_query.filterm(reponame__neq=hawkey.SYSTEM_REPO_NAME) if excludes_query: self.base.sack.add_excludes(excludes_query)
def sack(self): if not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) excludes_query = self.base.sack.query().filter(empty=True) excluded_count = 0 locked_query = self.base.sack.query().filter(empty=True) locked_names = set() locked_count = 0 for pat in _read_locklist(): excl = False if pat and pat[0] == '!': pat = pat[1:] excl = True subj = dnf.subject.Subject(pat) possible_nevras = list( subj.get_nevra_possibilities(forms=[hawkey.FORM_NEVRA])) if not possible_nevras: logger.error("%s %s", NEVRA_ERROR, pat) continue nevra = possible_nevras[0] pat_query = nevra.to_query(self.base.sack) if excl: excluded_count += 1 excludes_query = excludes_query.union(pat_query) else: locked_count += 1 locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if excluded_count: logger.debug(APPLY_EXCLUDE.format(locklist_fn, excluded_count)) if locked_count: logger.debug(APPLY_LOCK.format(locklist_fn, locked_count)) if locked_names: all_versions = self.base.sack.query().filter( name=list(locked_names)) other_versions = all_versions.difference(locked_query) excludes_query = excludes_query.union(other_versions) if excludes_query: self.base.sack.add_excludes(excludes_query)
def sack(self): if not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) excludes_query = self.base.sack.query().filter(empty=True) excluded_count = 0 locked_query = self.base.sack.query().filter(empty=True) locked_names = set() locked_count = 0 for pat in _read_locklist(): excl = False if pat and pat[0] == '!': pat = pat[1:] excl = True subj = dnf.subject.Subject(pat) possible_nevras = list(subj.get_nevra_possibilities(forms=[hawkey.FORM_NEVRA])) if not possible_nevras: logger.error("%s %s", NEVRA_ERROR, pat) continue nevra = possible_nevras[0] pat_query = nevra.to_query(self.base.sack) if excl: excluded_count += 1 excludes_query = excludes_query.union(pat_query) else: locked_count += 1 locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if excluded_count: logger.debug(APPLY_EXCLUDE.format(locklist_fn, excluded_count)) if locked_count: logger.debug(APPLY_LOCK.format(locklist_fn, locked_count)) if locked_names: all_versions = self.base.sack.query().filter(name=list(locked_names)) other_versions = all_versions.difference(locked_query) excludes_query = excludes_query.union(other_versions) if excludes_query: self.base.sack.add_excludes(excludes_query)
def sack(self): if self.cli is None: pass # loaded via the api, not called by cli elif not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) excludes_query = self.base.sack.query().filter(empty=True) locked_query = self.base.sack.query().filter(empty=True) locked_names = set() # counter of applied rules [locked_count, excluded_count] count = [0, 0] for pat in _read_locklist(): excl = 0 if pat and pat[0] == '!': pat = pat[1:] excl = 1 possible_nevras = dnf.subject.Subject(pat).get_nevra_possibilities() if possible_nevras: count[excl] += 1 else: logger.error("%s %s", NEVRA_ERROR, pat) continue for nevra in possible_nevras: pat_query = nevra.to_query(self.base.sack) if excl: excludes_query = excludes_query.union(pat_query) else: locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if count[1]: logger.debug(APPLY_EXCLUDE.format(locklist_fn, count[1])) if count[0]: logger.debug(APPLY_LOCK.format(locklist_fn, count[0])) if locked_names: all_versions = self.base.sack.query().filter(name__glob=list(locked_names)) other_versions = all_versions.difference(locked_query) excludes_query = excludes_query.union(other_versions) if excludes_query: self.base.sack.add_excludes(excludes_query)
def _check(self): """ Check installed containers and note if updates are availabe. """ container_list = self.be_utils.get_containers() needs_update = [] # If there is at least one container .... if len(container_list) > 0: logger.info("Checking %s local container(s) for updates", len(container_list)) # For each container we have .... for container in container_list: logger.debug("Checking: {} {} {} {}".format( container.name, container.original_structure.get('Type'), container.created, container.image_name)) inspection = util.skopeo_inspect('docker://' + container.image_name) digest = inspection.get('Digest', ':').split(':')[1] # Match the local image digest with the remote digest if digest != container.image: needs_update.append((container, digest)) logger.debug('%s: local=%s remote=%s', container.name, container.image, digest) # Let the operator know of each container that could be updates if needs_update: logger.info('The following containers need updating:') for container, digest in needs_update: logger.info('\t%s', container.name) logger.info('') logger.info('To update your containers use dnf containers ' 'update or, to update specific containers, the ' 'atomic command') logger.info('Example: sudo atomic containers update %s', needs_update[0][0].name) else: logger.info("No updates found") else: logger.debug("No containers found") return needs_update
def _debug(self, msg): logger.debug('{0} plugin: {1}'.format(self.__class__.__name__, msg))
def _out(self, msg): logger.debug('Ghost plugin: %s', msg)
def run(self): if self.opts.swidtagscmd[0] in ("purge", "sync", "regen"): self.plugin.purge_generated_dir() self.plugin.purge_generated_symlink() else: print("dnf swidtags [sync | purge]") if self.opts.swidtagscmd[0] in ("sync", "regen"): ts = rpm.transaction.initReadOnlyTransaction( root=self.base.conf.installroot) pkgs = [] for p in ts.dbMatch(): # Filter out imported GPG keys if p["arch"]: pkgs.append(p) dirs = {} for r in self.base.repos.iter_enabled(): if not hasattr(r, "get_metadata_path"): continue file = r.get_metadata_path(self.plugin.METADATA_TYPE) if not file or file == "": continue s = repodata.Swidtags(None, file) tags = s.tags_for_rpm_packages(pkgs) remaining_pkgs = [] for p in pkgs: if p not in tags: remaining_pkgs.append(p) continue found = False for t in tags[p]: logger.debug( "Retrieved SWID tag from repodata for %s: %s", get_nevra(p), t.get_tagid()) x = t.save_to_directory(self.plugin.dir_downloaded) dirs[x[0]] = True found = True if not found: remaining_pkgs.append(p) pkgs = remaining_pkgs for d in dirs: self.plugin.create_swidtags_d_symlink(path.basename(d)) if len(pkgs) > 0: run_ret = self.plugin.run_rpm2swidtag_for( [get_nevra(p) for p in pkgs]) if run_ret == 0: pkgs_missing = {} for p in pkgs: pkgs_missing[get_checksum(p)] = p for f in iglob( path.join(self.plugin.dir_generated, "*-rpm-*.swidtag")): m = re.search( r'-rpm-([0-9a-f]{40}([0-9a-f]{24})?)\.swidtag$', f) if m and m.group(1) in pkgs_missing: del pkgs_missing[m.group(1)] for p in pkgs_missing.values(): logger.warning( "The SWID tag for rpm %s should have been generated but could not be found", get_nevra(p)) if run_ret == -2: logger.warning( "The rpm2swidtag_command not configured for the %s plugin.\nSWID tags not generated locally for %d packages.", NAME, len(pkgs))
def activate_channels(self, networking=True): enabled_channels = {} sslcacert = None force_http = 0 proxy_url = None login_info = None cached_channels = self._read_channels_file() if not networking: # no network communication, use list of channels from persistdir enabled_channels = cached_channels else: # setup proxy according to up2date self.up2date_cfg = up2date_client.config.initUp2dateConfig() sslcacert = get_ssl_ca_cert(self.up2date_cfg) force_http = self.up2date_cfg['useNoSSLForPackages'], try: login_info = up2date_client.up2dateAuth.getLoginInfo( timeout=self.conf.timeout) except up2dateErrors.RhnServerException as e: logger.error("%s\n%s\n%s", COMMUNICATION_ERROR, RHN_DISABLED, e) return if not login_info: logger.error("%s\n%s", NOT_REGISTERED_ERROR, RHN_DISABLED) self._write_channels_file({}) return try: svrChannels = up2date_client.rhnChannel.getChannelDetails( timeout=self.conf.timeout) except up2dateErrors.CommunicationError as e: logger.error("%s\n%s\n%s", COMMUNICATION_ERROR, RHN_DISABLED, e) return except up2dateErrors.NoChannelsError: logger.error("%s\n%s", NOT_SUBSCRIBED_ERROR, CHANNELS_DISABLED) self._write_channels_file({}) return except up2dateErrors.NoSystemIdError: logger.error("%s %s\n%s\n%s", NOT_SUBSCRIBED_ERROR, NO_SYSTEM_ID_ERROR, USE_RHNREGISTER, RHN_DISABLED) return self.connected_to_spacewalk = True logger.info(UPDATES_FROM_SPACEWALK) for channel in svrChannels: if channel['version']: enabled_channels[channel['label']] = dict(channel.items()) self._write_channels_file(enabled_channels) repos = self.base.repos for (channel_id, channel_dict) in enabled_channels.items(): cached_channel = cached_channels.get(channel_id) cached_version = None if cached_channel: cached_version = cached_channel.get('version') conf = copy(self.conf) if channel_id in self.parser.sections(): options = self.parser.items(channel_id) for (key, value) in options: conf._set_value(key, value, PRIO_PLUGINCONFIG) repo = SpacewalkRepo( channel_dict, { 'conf': self.base.conf, 'proxy': proxy_url, 'timeout': conf.timeout, 'sslcacert': sslcacert, 'force_http': force_http, 'cached_version': cached_version, 'login_info': login_info, 'gpgcheck': conf.gpgcheck, 'enabled': conf.enabled, }) repos.add(repo) # DEBUG logger.debug(enabled_channels)
def _out(self, msg): logger.debug("Etckeeper plugin: %s", msg)
def _out(self, msg): logger.debug('Etckeeper plugin: %s', msg)
def transaction(self): logger.debug('Notifying nsbox host of updates...') subprocess.run(['nsbox-host', 'reload-exports'])
def resolved(self): """ Once transaction is resolved we are here """ if alllangs: logger.debug("langpacks: enabled languages are %s", alllangs) else: logger.debug("langpacks: No languages are enabled")
def _out(msg): logger.debug('Ghost plugin: %s', msg)
def __init__(self, base, cli): """Initialize the plugin instance.""" self.base = base (lang, _) = locale.getdefaultlocale() # LANG=C returns (None, None). Set a default. if lang is None: lang = "en" if lang.endswith(".UTF-8"): lang = lang.split('.UTF-8')[0] if lang.find("_"): if lang not in whitelisted_locales: lang = lang.split('_')[0] alllangs.append(lang) try: config = self.read_config(self.base.conf, "langpacks") try: conflist = config.get('main', 'langpack_locales') if conflist: tmp = conflist.split(",") for confitem in tmp: confitem = confitem.strip() shortlang = confitem.split('.UTF-8')[0] if shortlang not in whitelisted_locales: shortlang = confitem.split('_')[0] logger.debug("Adding %s to language list", shortlang) if shortlang not in alllangs: alllangs.append(shortlang) except ini.NoSectionError: logger.debug( "langpacks: No main section defined in langpacks.conf") except ini.NoOptionError: logger.debug("langpacks: No languages are enabled") except ini.Error: logger.debug('langpacks.conf file could not be found') langc = LangpackCommon() llist = langc.read_installed_langpacks() for lang in llist: if not lang.startswith("#"): logger.debug("Adding %s to language list", lang) alllangs.append(lang) super(Langpacks, self).__init__(base, cli) if cli is not None: cli.register_command(LangavailableCommand) cli.register_command(LanginfoCommand) cli.register_command(LanglistCommand) cli.register_command(LanginstallCommand) cli.register_command(LangremoveCommand) logger.debug("initialized Langpacks plugin")
def transaction(self): remove_packages = {} for p in self.remove_set: if p not in self.remove_set_checksum: logger.warning("Could not identify checksum for %s, potential SWID tag will not be removed", p) continue remove_packages[self.remove_set_checksum[p]] = True if len(remove_packages) > 0: for f in iglob(path.join(self.base.conf.installroot, SWIDTAGS_D, "*", "*-rpm-*.swidtag")): m = re.search(r'-rpm-([0-9a-f]{40}([0-9a-f]{24})?)\.swidtag$', f) if not m: continue if m.group(1) in remove_packages: self.remove_file(f) downloaded_swidtags = {} packages_in_repos = { None: [] } dirs = {} for i in self.install_set: try: checksum = self.get_nevra_checksum(str(i), verbose=False) if not checksum: logger.warning("No installed rpm found for package %s, will not sync SWID tag.", str(i) ) continue r = i.repo if r not in downloaded_swidtags: downloaded_swidtags[r] = None if hasattr(r, "get_metadata_path"): file = r.get_metadata_path(self.METADATA_TYPE) if file and file != "": downloaded_swidtags[r] = repodata.Swidtags(None, file) if downloaded_swidtags[r]: if r not in packages_in_repos: packages_in_repos[r] = [] packages_in_repos[r].append((i, checksum)) continue except KeyError: pass packages_in_repos[None].append((i, checksum)) for r in packages_in_repos: if not r: continue tags = downloaded_swidtags[r].tags_for_repo_packages(packages_in_repos[r]) for p in tags: found = False for t in tags[p]: logger.debug("Retrieved SWID tag from repodata for %s: %s", p[0], t.get_tagid()) x = t.save_to_directory(self.dir_downloaded) dirs[x[0]] = True found = True if not found: packages_in_repos[None].append(p) for d in dirs: self.create_swidtags_d_symlink(path.basename(d)) if len(packages_in_repos[None]) > 0: p_names = [ str(p[0]) for p in packages_in_repos[None]] if self.run_rpm2swidtag_for(p_names) == 0: pkgs_missing = {} for p in packages_in_repos[None]: pkgs_missing[p[1]] = p[0] for f in iglob(path.join(self.dir_generated, "*-rpm-*.swidtag")): m = re.search(r'-rpm-([0-9a-f]{40}([0-9a-f]{24})?)\.swidtag$', f) if m and m.group(1) in pkgs_missing: del pkgs_missing[m.group(1)] for p in pkgs_missing.values(): logger.warning("The SWID tag for rpm %s should have been generated but could not be found", str(p))
def _out(msg): logger.debug('Completion plugin: %s', msg)
def __init__(self, base, cli): if cli: cli.register_command(BuildDepCommand) logger.debug('initialized BuildDep plugin')
def activate_channels(self, networking=True): enabled_channels = {} sslcacert = None force_http = 0 proxy_url = None login_info = None cached_channels = self._read_channels_file() if not networking: # no network communication, use list of channels from persistdir enabled_channels = cached_channels else: # setup proxy according to up2date self.up2date_cfg = up2date_client.config.initUp2dateConfig() sslcacert = get_ssl_ca_cert(self.up2date_cfg) force_http = self.up2date_cfg['useNoSSLForPackages'], try: login_info = up2date_client.up2dateAuth.getLoginInfo(timeout=self.conf.timeout) except up2dateErrors.RhnServerException as e: logger.error("%s\n%s\n%s", COMMUNICATION_ERROR, RHN_DISABLED, e) return if not login_info: logger.error("%s\n%s", NOT_REGISTERED_ERROR, RHN_DISABLED) self._write_channels_file({}) return try: svrChannels = up2date_client.rhnChannel.getChannelDetails( timeout=self.conf.timeout) except up2dateErrors.CommunicationError as e: logger.error("%s\n%s\n%s", COMMUNICATION_ERROR, RHN_DISABLED, e) return except up2dateErrors.NoChannelsError: logger.error("%s\n%s", NOT_SUBSCRIBED_ERROR, CHANNELS_DISABLED) self._write_channels_file({}) return except up2dateErrors.NoSystemIdError: logger.error("%s %s\n%s\n%s", NOT_SUBSCRIBED_ERROR, NO_SYSTEM_ID_ERROR, USE_RHNREGISTER, RHN_DISABLED) return self.connected_to_spacewalk = True logger.info(UPDATES_FROM_SPACEWALK) for channel in svrChannels: if channel['version']: enabled_channels[channel['label']] = dict(channel.items()) self._write_channels_file(enabled_channels) repos = self.base.repos for (channel_id, channel_dict) in enabled_channels.items(): cached_channel = cached_channels.get(channel_id) cached_version = None if cached_channel: cached_version = cached_channel.get('version') conf = copy(self.conf) if channel_id in self.parser.sections(): options = self.parser.items(channel_id) for (key, value) in options: setattr(conf, key, value) repo = SpacewalkRepo(channel_dict, { 'cachedir' : self.base.conf.cachedir, 'proxy' : proxy_url, 'timeout' : conf.timeout, 'sslcacert' : sslcacert, 'force_http': force_http, 'cached_version' : cached_version, 'login_info': login_info, 'gpgcheck': conf.gpgcheck, 'enabled': conf.enabled, }) repos.add(repo) # DEBUG logger.debug(enabled_channels)
def __init__(self, base, cli): self.base = base self.cli = cli logger.debug('initialized Noroot plugin')