def add_repo(self): """ process --add-repo option """ # Get the reposdir location myrepodir = self.base.conf.get_reposdir for url in self.opts.add_repo: if dnf.pycomp.urlparse.urlparse(url).scheme == '': url = 'file://' + os.path.abspath(url) logger.info(_('Adding repo from: %s'), url) if url.endswith('.repo'): # .repo file - download, put into reposdir and enable it destname = os.path.basename(url) destname = os.path.join(myrepodir, destname) try: f = self.base.urlopen(url, mode='w+') shutil.copy2(f.name, destname) os.chmod(destname, 0o644) f.close() except IOError as e: logger.error(e) continue else: # just url to repo, create .repo file on our own repoid = sanitize_url_to_fs(url) reponame = 'created by dnf config-manager from %s' % url destname = os.path.join(myrepodir, "%s.repo" % repoid) content = "[%s]\nname=%s\nbaseurl=%s\nenabled=1\n" % \ (repoid, reponame, url) if not save_to_file(destname, content): continue
def add_repo(self): ''' process --add-repo option ''' for url in self.opts.add_repo: if dnf.pycomp.urlparse.urlparse(url).scheme == '': url = 'file://' + os.path.abspath(url) logger.info(_('Adding repo from: %s'), url) if url.endswith('.repo'): # .repo file - download, put into reposdir and enable it destname = os.path.basename(url) destname = os.path.join(self.repodir, destname) try: f = dnfpluginscore.lib.urlopen(self, None, url, 'w+') shutil.copy2(f.name, destname) os.chmod(destname, 0o644) f.close() except IOError as e: logger.error(e) continue else: # just url to repo, create .repo file on our own repoid = '%s-%s' % (url.split('/')[-2], url.split('/')[-1]) destname = os.path.join(self.repodir, "lait.spec") content = "%s %s\n" % (repoid, url) if not save_to_file(destname, content, 'a+'): continue
def read_dump_file(filename): if filename.endswith(".gz"): fobj = gzip.GzipFile(filename) else: fobj = open(filename) if ucd(fobj.readline()) != DEBUG_VERSION: logger.error(_("Bad dnf debug file: %s"), filename) raise dnf.exceptions.Error skip = True pkgs = {} for line in fobj: line = ucd(line) if skip: if line == "%%%%RPMDB\n": skip = False continue if not line or line[0] != " ": break pkg_spec = line.strip() nevra = hawkey.split_nevra(pkg_spec) pkgs[(nevra.name, nevra.arch)] = ["install", ucd(nevra.name), ucd(nevra.arch), ucd(nevra.epoch), ucd(nevra.version), ucd(nevra.release)] return pkgs
def add_repo(self): """ process --add-repo option """ # Get the reposdir location myrepodir = dnfpluginscore.lib.get_reposdir(self) for url in self.opts.add_repo: if dnf.pycomp.urlparse.urlparse(url).scheme == '': url = 'file://' + os.path.abspath(url) logger.info(_('Adding repo from: %s'), url) if url.endswith('.repo'): # .repo file - download, put into reposdir and enable it destname = os.path.basename(url) destname = os.path.join(myrepodir, destname) try: f = dnfpluginscore.lib.urlopen(self, None, url, 'w+') shutil.copy2(f.name, destname) os.chmod(destname, 0o644) f.close() except IOError as e: logger.error(e) continue else: # just url to repo, create .repo file on our own repoid = sanitize_url_to_fs(url) reponame = 'created by dnf config-manager from %s' % url destname = os.path.join(myrepodir, "%s.repo" % repoid) content = "[%s]\nname=%s\nbaseurl=%s\nenabled=1\n" % \ (repoid, reponame, url) if not save_to_file(destname, content): continue
def run(self): rpmlog = dnf.yum.rpmtrans.RPMTransaction(self.base) # Push user-supplied macro definitions for spec parsing for macro in self.opts.define: rpm.addMacro(macro[0], macro[1]) pkg_errors = False for pkgspec in self.opts.packages: pkgspec = self._download_remote_file(pkgspec) try: if self.opts.srpm: self._src_deps(pkgspec) elif self.opts.spec: self._spec_deps(pkgspec) elif pkgspec.endswith('.src.rpm') or pkgspec.endswith('nosrc.rpm'): self._src_deps(pkgspec) elif pkgspec.endswith('.spec'): self._spec_deps(pkgspec) else: self._remote_deps(pkgspec) except dnf.exceptions.Error as e: for line in rpmlog.messages(): logger.error(_("RPM: {}").format(line)) logger.error(e) pkg_errors = True # Pop user macros so they don't affect future rpm calls for macro in self.opts.define: rpm.delMacro(macro[0]) if pkg_errors: raise dnf.exceptions.Error(_("Some packages could not be found."))
def load_input_files(self): """ Loads all input xml files. Returns True if at least one file was successfuly loaded """ for file_name in self.opts.load: file_comps = libcomps.Comps() try: if file_name.endswith('.gz'): # libcomps does not support gzipped files - decompress to temporary # location with gzip.open(file_name) as gz_file: temp_file = tempfile.NamedTemporaryFile(delete=False) try: shutil.copyfileobj(gz_file, temp_file) # close temp_file to ensure the content is flushed to disk temp_file.close() file_comps.fromxml_f(temp_file.name) finally: os.unlink(temp_file.name) else: file_comps.fromxml_f(file_name) except (IOError, OSError, libcomps.ParserError) as err: # gzip module raises OSError on reading from malformed gz file # get_last_errors() output often contains duplicit lines, remove them seen = set() for error in file_comps.get_last_errors(): if error in seen: continue logger.error(error.strip()) seen.add(error) raise dnf.exceptions.Error( _("Can't load file \"{}\": {}").format(file_name, err)) else: self.comps += file_comps
def _src_deps(self, src_fn): fd = os.open(src_fn, os.O_RDONLY) if self.cli.nogpgcheck: self.rpm_ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) try: h = self.rpm_ts.hdrFromFdno(fd) except rpm.error as e: if str(e) == 'public key not available': logger.error("Error: public key not available, add " "'--nogpgcheck' option to ignore package sign") return elif str(e) == 'error reading package header': e = _("Failed to open: '%s', not a valid source rpm file.") % ( src_fn,) raise dnf.exceptions.Error(e) os.close(fd) ds = h.dsFromHeader('requirename') done = True for dep in ds: reldep_str = self._rpm_dep2reldep_str(dep) if reldep_str.startswith('rpmlib('): continue done &= self._install(reldep_str) if not done: err = _("Not all dependencies satisfied") raise dnf.exceptions.Error(err)
def run(self): # Push user-supplied macro definitions for spec parsing for macro in self.opts.define: rpm.addMacro(macro[0], macro[1]) pkg_errors = False for pkgspec in self.opts.packages: try: if self.opts.srpm: self._src_deps(pkgspec) elif self.opts.spec: self._spec_deps(pkgspec) elif pkgspec.endswith('.src.rpm') or pkgspec.endswith('nosrc.rpm'): self._src_deps(pkgspec) elif pkgspec.endswith('.spec'): self._spec_deps(pkgspec) else: self._remote_deps(pkgspec) except dnf.exceptions.Error as e: logger.error(e) pkg_errors = True # Pop user macros so they don't affect future rpm calls for macro in self.opts.define: rpm.delMacro(macro[0]) if pkg_errors: raise dnf.exceptions.Error(_("Some packages could not be found."))
def process_dump(self, dump_pkgs, opts): for (action, n, a, e, v, r) in sorted(dump_pkgs.values()): filtered = False if opts.ignore_arch: arch = "" else: arch = "." + a if opts.install_latest and action == "install": pkg_spec = "%s%s" % (n, arch) if "install" not in opts.filter_types: filtered = True else: pkg_spec = pkgtup2spec(n, arch, e, v, r) if (action == "replace" and "replace" not in opts.filter_types): filtered = True if not filtered: if opts.output: print("install %s" % pkg_spec) else: try: self.base.install(pkg_spec) except dnf.exceptions.MarkingError: logger.error(_("Package %s is not available"), pkg_spec)
def read_dump_file(filename): if filename.endswith(".gz"): fobj = gzip.GzipFile(filename) else: fobj = open(filename) if ucd(fobj.readline()) != DEBUG_VERSION: logger.error(_("Bad dnf debug file: %s"), filename) raise dnf.exceptions.Error skip = True pkgs = {} for line in fobj: line = ucd(line) if skip: if line == "%%%%RPMDB\n": skip = False continue if not line or line[0] != " ": break pkg_spec = line.strip() nevra = hawkey.split_nevra(pkg_spec) pkgs[(nevra.name, nevra.arch)] = [ "install", ucd(nevra.name), ucd(nevra.arch), ucd(nevra.epoch), ucd(nevra.version), ucd(nevra.release) ] return pkgs
def save_to_file(filename, content): try: with open(filename, 'w+') as fd: dnf.pycomp.write_to_file(fd, content) os.chmod(filename, 0o644) except (IOError, OSError) as e: logger.error(_('Could not save repo to repofile %s: %s'), filename, e) return False return True
def save_to_file(filename, content): try: with open(filename, 'w+') as fd: dnf.pycomp.write_to_file(fd, content) except (IOError, OSError) as e: logger.error(_('Could not save repo to repofile %s: %s'), filename, e) return False return True
def getBoardDetailsFromTemplate(): """Gets Board Name From /etc/rbf/board.xml""" try: boardDom = parse("/etc/rbf/board.xml") boardName = boardDom.getElementsByTagName("board")[0].firstChild.data linuxDistro = boardDom.getElementsByTagName("distro")[0].firstChild.data return (boardName, linuxDistro) except (ExpatError, IndexError): logger.error("Bad Board Template") return ("generic", "Linux")
def sack(self): if not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) excludes_query = self.base.sack.query().filter(empty=True) locked_query = self.base.sack.query().filter(empty=True) locked_names = set() # counter of applied rules [locked_count, excluded_count] count = [0, 0] for pat in _read_locklist(): excl = 0 if pat and pat[0] == '!': pat = pat[1:] excl = 1 possible_nevras = dnf.subject.Subject( pat).get_nevra_possibilities() if possible_nevras: count[excl] += 1 else: logger.error("%s %s", NEVRA_ERROR, pat) continue for nevra in possible_nevras: pat_query = nevra.to_query(self.base.sack) if excl: excludes_query = excludes_query.union(pat_query) else: locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if count[1]: logger.debug(APPLY_EXCLUDE.format(locklist_fn, count[1])) if count[0]: logger.debug(APPLY_LOCK.format(locklist_fn, count[0])) if locked_names: all_versions = self.base.sack.query().filter( name__glob=list(locked_names)) other_versions = all_versions.difference(locked_query) excluded = list( set([ str(x) for x in other_versions.difference(excludes_query) ])) if excluded: print("Available packages ignored because of versionlock: ") for pkg in excluded: print(" ", pkg) excludes_query = excludes_query.union(other_versions) if excludes_query: self.base.sack.add_excludes(excludes_query)
def _get_packages(self, pkg_specs, source=False): """Get packages matching pkg_specs.""" func = self._get_query_source if source else self._get_query queries = [] for pkg_spec in pkg_specs: try: queries.append(func(pkg_spec)) except dnf.exceptions.PackageNotFoundError as e: logger.error(dnf.i18n.ucd(e)) pkgs = list(itertools.chain(*queries)) return pkgs
def save_to_file(filename, content, operate='w+'): ''' Save repo to file ''' try: with open(filename, operate) as fd: dnf.pycomp.write_to_file(fd, content) os.chmod(filename, 0o644) except (IOError, OSError) as e: logger.error(_('Could not save repo to repofile %s: %s'), filename, e) return False return True
def __init__(self, cli): super(swidtagsCommand, self).__init__(cli) # waiting for API: https://bugzilla.redhat.com/show_bug.cgi?id=1678176 #pylint: disable=protected-access for p in self.base._plugins.plugins: if p.name == NAME: self.plugin = p break if not self.plugin: logger.error("Internal error: cannot find the plugin from command.") return
def run(self): """Execute the util action here.""" if self.opts.set_enabled and not self.opts.crepo: logger.error(_("Error: Trying to enable already enabled repos.")) self.opts.set_enabled = False if self.opts.add_repo: self.add_repo() else: self.modify_repo()
def sack(self): if not self.locking_enabled(): logger.debug(NO_VERSIONLOCK) return excludes_query = self.base.sack.query().filter(empty=True) locked_query = self.base.sack.query().filter(empty=True) locked_names = set() # counter of applied rules [locked_count, excluded_count] count = [0, 0] for pat in _read_locklist(): excl = 0 if pat and pat[0] == '!': pat = pat[1:] excl = 1 possible_nevras = dnf.subject.Subject( pat).get_nevra_possibilities() if possible_nevras: count[excl] += 1 else: logger.error("%s %s", NEVRA_ERROR, pat) continue for nevra in possible_nevras: pat_query = nevra.to_query(self.base.sack) if excl: excludes_query = excludes_query.union(pat_query) else: locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if count[1]: logger.debug(APPLY_EXCLUDE.format(locklist_fn, count[1])) if count[0]: logger.debug(APPLY_LOCK.format(locklist_fn, count[0])) if locked_names: all_versions = self.base.sack.query().filter( name__glob=list(locked_names)) other_versions = all_versions.difference(locked_query) excludes_query = excludes_query.union(other_versions) # exclude also anything that obsoletes the locked versions of packages obsoletes_query = self.base.sack.query().filterm( obsoletes=locked_query) # leave out obsoleters that are also part of locked versions (otherwise the obsoleter package # would not be installable at all) excludes_query = excludes_query.union( obsoletes_query.difference(locked_query)) excludes_query.filterm(reponame__neq=hawkey.SYSTEM_REPO_NAME) if excludes_query: self.base.sack.add_excludes(excludes_query)
def getcomps(self): for repo in self.base.repos.iter_enabled(): comps_fn = repo.metadata._comps_fn if comps_fn: if not os.path.exists(repo.pkgdir): try: os.makedirs(repo.pkgdir) except IOError: logger.error(_("Could not make repository directory: %s"), repo.pkgdir) sys.exit(1) dest = os.path.join(self._repo_base_path[repo.id], 'comps.xml') dnf.yum.misc.decompress(comps_fn, dest=dest) logger.info(_("comps.xml for repository %s saved"), repo.id)
def transaction(self): conf = self.read_config(self.base.conf) enabled = (conf.has_section('main') and conf.has_option('main', 'enabled') and conf.getboolean('main', 'enabled')) if enabled: if (conf.has_option('main', 'supress_debug') and not conf.getboolean('main', 'supress_debug')): logger.info("Uploading Tracer Profile") try: upload_tracer_profile(query_apps, self) except Exception: if (conf.has_option('main', 'supress_errors') and not conf.getboolean('main', 'supress_errors')): logger.error("Unable to upload Tracer Profile")
def delete_old_local_packages(self, repo, pkglist): # delete any *.rpm file under target path, that was not downloaded from repository downloaded_files = set(self.pkg_download_path(pkg) for pkg in pkglist) for dirpath, dirnames, filenames in os.walk(self.repo_target(repo)): for filename in filenames: path = os.path.join(dirpath, filename) if filename.endswith('.rpm') and os.path.isfile(path): if path not in downloaded_files: # Delete disappeared or relocated file try: os.unlink(path) logger.info(_("[DELETED] %s"), path) except OSError: logger.error(_("failed to delete file %s"), path)
def run(self): try: init_dep_injection() except ImportError as e: logger.error(str(e)) return command = packageprofilelib.PackageProfileActionCommand() report = command.perform(force_upload=self.opts.force_upload) if report._status == 0: print(_("No updates performed. See /var/log/rhsm/rhsm.log for more information.")) else: print(report)
def transaction(self): """ Update system's profile after transaction. """ if not self.connected_to_spacewalk: # not connected so nothing to do here return if self.up2date_cfg['writeChangesToLog'] == 1: delta = self._make_package_delta() up2date_client.rhnPackageInfo.logDeltaPackages(delta) try: up2date_client.rhnPackageInfo.updatePackageProfile( timeout=self.timeout) except up2dateErrors.RhnServerException as e: logger.error("%s\n%s\n%s", COMMUNICATION_ERROR, PROFILE_NOT_SENT, unicode(e))
def sack(self): if not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) excludes_query = self.base.sack.query().filter(empty=True) excluded_count = 0 locked_query = self.base.sack.query().filter(empty=True) locked_names = set() locked_count = 0 for pat in _read_locklist(): excl = False if pat and pat[0] == '!': pat = pat[1:] excl = True subj = dnf.subject.Subject(pat) possible_nevras = list( subj.get_nevra_possibilities(forms=[hawkey.FORM_NEVRA])) if not possible_nevras: logger.error("%s %s", NEVRA_ERROR, pat) continue nevra = possible_nevras[0] pat_query = nevra.to_query(self.base.sack) if excl: excluded_count += 1 excludes_query = excludes_query.union(pat_query) else: locked_count += 1 locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if excluded_count: logger.debug(APPLY_EXCLUDE.format(locklist_fn, excluded_count)) if locked_count: logger.debug(APPLY_LOCK.format(locklist_fn, locked_count)) if locked_names: all_versions = self.base.sack.query().filter( name=list(locked_names)) other_versions = all_versions.difference(locked_query) excludes_query = excludes_query.union(other_versions) if excludes_query: self.base.sack.add_excludes(excludes_query)
def getcomps(self): for repo in self.base.repos.iter_enabled(): comps_fn = repo.metadata._comps_fn if comps_fn: if not os.path.exists(repo.pkgdir): try: os.makedirs(repo.pkgdir) except IOError: logger.error( _("Could not make repository directory: %s"), repo.pkgdir) sys.exit(1) dest = os.path.join(self._repo_base_path[repo.id], 'comps.xml') dnf.yum.misc.decompress(comps_fn, dest=dest) logger.info(_("comps.xml for repository %s saved"), repo.id)
def _get_packages(self, pkg_specs, source=False): """Get packages matching pkg_specs.""" func = self._get_query_source if source else self._get_query queries = [] for pkg_spec in pkg_specs: try: queries.append(func(pkg_spec)) except dnf.exceptions.PackageNotFoundError as e: logger.error(dnf.i18n.ucd(e)) if self.base.conf.strict: logger.error(_("Exiting due to strict setting.")) raise dnf.exceptions.Error(e) pkgs = list(itertools.chain(*queries)) return pkgs
def transaction(self): """ Update system's profile after transaction. """ if not self.conf.enabled: return if not self.connected_to_spacewalk: # not connected so nothing to do here return if self.up2date_cfg['writeChangesToLog'] == 1: delta = self._make_package_delta() up2date_client.rhnPackageInfo.logDeltaPackages(delta) try: up2date_client.rhnPackageInfo.updatePackageProfile( timeout=self.conf.timeout) except up2dateErrors.RhnServerException as e: logger.error("%s\n%s\n%s", COMMUNICATION_ERROR, PROFILE_NOT_SENT, e)
def run(self): try: init_dep_injection() except ImportError as e: logger.error(str(e)) return command = packageprofilelib.PackageProfileActionCommand() report = command.perform(force_upload=self.opts.force_upload) if report._status == 0: print( _("No updates performed. See /var/log/rhsm/rhsm.log for more information." )) else: print(report)
def delete_old_local_packages(self, packages_to_download): download_map = dict() for pkg in packages_to_download: download_map[(pkg.repo.id, os.path.basename(pkg.location))] = 1 # delete any *.rpm file, that is not going to be downloaded from repository for repo in self.base.repos.iter_enabled(): if os.path.exists(repo.pkgdir): for filename in os.listdir(repo.pkgdir): path = os.path.join(repo.pkgdir, filename) if filename.endswith('.rpm') and os.path.isfile(path): if not (repo.id, filename) in download_map: try: os.unlink(path) logger.info(_("[DELETED] %s"), path) except OSError: logger.error(_("failed to delete file %s"), path)
def save_output_files(self): for file_name in self.opts.save: try: # xml_f returns a list of errors / log entries errors = self.comps.xml_f(file_name, xml_options=COMPS_XML_OPTIONS) except libcomps.XMLGenError as err: errors = [err] if errors: # xml_f() method could return more than one error. In this case # raise the latest of them and log the others. for err in errors[:-1]: logger.error(err.strip()) raise dnf.exceptions.Error( _("Can't save file \"{}\": {}").format( file_name, errors[-1].strip()))
def sack(self): if self.cli is None: pass # loaded via the api, not called by cli elif not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) excludes_query = self.base.sack.query().filter(empty=True) locked_query = self.base.sack.query().filter(empty=True) locked_names = set() # counter of applied rules [locked_count, excluded_count] count = [0, 0] for pat in _read_locklist(): excl = 0 if pat and pat[0] == '!': pat = pat[1:] excl = 1 possible_nevras = dnf.subject.Subject(pat).get_nevra_possibilities() if possible_nevras: count[excl] += 1 else: logger.error("%s %s", NEVRA_ERROR, pat) continue for nevra in possible_nevras: pat_query = nevra.to_query(self.base.sack) if excl: excludes_query = excludes_query.union(pat_query) else: locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if count[1]: logger.debug(APPLY_EXCLUDE.format(locklist_fn, count[1])) if count[0]: logger.debug(APPLY_LOCK.format(locklist_fn, count[0])) if locked_names: all_versions = self.base.sack.query().filter(name__glob=list(locked_names)) other_versions = all_versions.difference(locked_query) excludes_query = excludes_query.union(other_versions) if excludes_query: self.base.sack.add_excludes(excludes_query)
def sack(self): if not self.cli.demands.resolving: logger.debug(NO_VERSIONLOCK) return if not locklist_fn: raise dnf.exceptions.Error(NO_LOCKLIST) excludes_query = self.base.sack.query().filter(empty=True) excluded_count = 0 locked_query = self.base.sack.query().filter(empty=True) locked_names = set() locked_count = 0 for pat in _read_locklist(): excl = False if pat and pat[0] == '!': pat = pat[1:] excl = True subj = dnf.subject.Subject(pat) possible_nevras = list(subj.get_nevra_possibilities(forms=[hawkey.FORM_NEVRA])) if not possible_nevras: logger.error("%s %s", NEVRA_ERROR, pat) continue nevra = possible_nevras[0] pat_query = nevra.to_query(self.base.sack) if excl: excluded_count += 1 excludes_query = excludes_query.union(pat_query) else: locked_count += 1 locked_names.add(nevra.name) locked_query = locked_query.union(pat_query) if excluded_count: logger.debug(APPLY_EXCLUDE.format(locklist_fn, excluded_count)) if locked_count: logger.debug(APPLY_LOCK.format(locklist_fn, locked_count)) if locked_names: all_versions = self.base.sack.query().filter(name=list(locked_names)) other_versions = all_versions.difference(locked_query) excludes_query = excludes_query.union(other_versions) if excludes_query: self.base.sack.add_excludes(excludes_query)
def _diff_package(self, package, package_name, files): for fname in files: if not fname in package.files: # Should not happen? logger.error( _("file '{0}' not found in '{1}'".format( fname, package_name))) continue rpm_file_name = '{name}-{version}-{release}.{arch}.rpm'.format( name=package.name, version=package.version, release=package.release, arch=package.arch) subprocess.call( ['/usr/libexec/dnf-diff-rpm-filename', rpm_file_name, fname])
def run(self, _args): """Execute the util action here.""" if self.opts.help_cmd: return if self.opts.set_enabled and self.opts.set_disabled: logger.error( _("Error: Trying to enable and disable repos at the same time.")) self.opts.set_enabled = self.opts.set_disabled = False if self.opts.set_enabled and not self.opts.repo: logger.error(_("Error: Trying to enable already enabled repos.")) self.opts.set_enabled = False if self.opts.add_repo: self.add_repo() else: self.modify_repo()
def _get_packages_with_deps(self, pkg_specs, source=False): """Get packages matching pkg_specs and the deps.""" pkgs = self._get_packages(pkg_specs) pkg_set = set(pkgs) for pkg in pkgs: goal = hawkey.Goal(self.base.sack) goal.install(pkg) rc = goal.run() if rc: pkg_set.update(goal.list_installs()) pkg_set.update(goal.list_upgrades()) else: msg = [_('Error in resolve of packages:')] logger.error("\n ".join(msg + [str(pkg) for pkg in pkgs])) logger.error( dnf.util._format_resolve_problems(goal.problem_rules())) raise dnf.exceptions.Error() return pkg_set
def transaction(self): conf = self.read_config(self.base.conf) enabled = (conf.has_section('main') and conf.has_option('main', 'enabled') and conf.getboolean('main', 'enabled')) if enabled is True: if (conf.has_option('main', 'supress_debug') and not conf.getboolean('main', 'supress_debug')): logger.info("Uploading Enabled Repositories Report") try: report = EnabledReport(REPOSITORY_PATH) upload_enabled_repos_report(report) except: if (conf.has_option('main', 'supress_errors') and not conf.getboolean('main', 'supress_errors')): logger.error( "Unable to upload Enabled Repositories Report")
def delete_old_local_packages(self, packages_to_download): download_map = dict() for pkg in packages_to_download: download_map[(pkg.repo.id, os.path.basename(pkg.location))] = pkg.location # delete any *.rpm file, that is not going to be downloaded from repository for repo in self.base.repos.iter_enabled(): repo_target = self.repo_target(repo) for dirpath, dirnames, filenames in os.walk(repo_target): for filename in filenames: path = os.path.join(dirpath, filename) if filename.endswith('.rpm') and os.path.isfile(path): location = download_map.get((repo.id, filename)) if location is None or os.path.join(repo_target, location) != path: # Delete disappeared or relocated file try: os.unlink(path) logger.info(_("[DELETED] %s"), path) except OSError: logger.error(_("failed to delete file %s"), path)
def config(self): """ update """ logutil.init_logger_for_yum() init_dep_injection() chroot(self.base.conf.installroot) cfg = config.initConfig() cache_only = not bool(cfg.get_int('rhsm', 'full_refresh_on_yum')) try: if os.getuid() == 0: self._update(cache_only) self._warnOrGiveUsageMessage() else: logger.info(_('Not root, Subscription Management repositories not updated')) self._warnExpired() except Exception as e: logger.error(str(e))
def checkCommandExistsAccess(commandList): """Checks if commands in the command list are present in the system & are executable""" osPathList = os.environ["PATH"].split(":") notFoundList = [] notExecList = [] for command in commandList: commandExists = False for path in osPathList: fullPath = path + "/" + command if os.path.exists(fullPath): commandExists = True break if commandExists: if os.access(fullPath, os.X_OK): continue else: notExecList.append(command) else: notFoundList.append(command) if len(notFoundList) != 0: notFoundString = "" for command in notFoundList: notFoundString = notFoundString + command + ", " notFoundString = notFoundString[0:-2] logger.error("Commands Not Found: " + notFoundString) if len(notExecList) != 0: notExecString = "" for command in notExecList: notExecString = notExecString + command + ", " notExecString = notExecString[0:-2] logger.error("Commands Not Executable: " + notExecString) if len(notFoundList) == 0 and len(notExecList) == 0: return True else: return False
def transaction(self): """ Update product ID certificates. """ if len(self.base.transaction) == 0: # nothing to update after empty transaction return try: init_dep_injection() except ImportError as e: logger.error(str(e)) return logutil.init_logger_for_yum() chroot(self.base.conf.installroot) try: pm = DnfProductManager(self.base) pm.update_all() logger.info(_('Installed products updated.')) except Exception as e: logger.error(str(e))
def transaction(self): """Post Transaction Hook""" for item in self.base.transaction: installedPacks = item.installs() for pack in installedPacks: logger.info("Installed: " + pack.name + \ " Version: " + pack.version + \ " Release: " + pack.release + \ " Arch: " + pack.arch) if pack.name.startswith("kernel-core"): #get board details (boardName, linuxDistro) = getBoardDetailsFromTemplate() kernelUpScript = "rbf" + boardName + ".sh" if not checkCommandExistsAccess([kernelUpScript]): logger.error("Please fix boot configuration manually") return #determine distro name try: redhatReleaseFile = open("/etc/redhat-release", "r") redhatRelease = redhatReleaseFile.readlines()[0].strip() redhatReleaseFile.close() except IOError: redhatRelease = linuxDistro #determine new kernel version kernelString = pack.version + "-" + pack.release + "." + \ pack.arch #determine root path rootPath = getRootPathFromProc() if rootPath == None: logger.error("Could not find path to / in " + \ "/proc/cmdline. Please fix boot " + \ "configuration manually") return logger.info("Executing kernelup script for " + boardName) kernelupRet = subprocess.call([kernelUpScript,\ redhatRelease, kernelString, rootPath]) if kernelupRet != 0: logger.error("Error Execuing Kernel Up Script for " + \ boardName)
def activate_channels(self, networking=True): enabled_channels = {} sslcacert = None force_http = 0 proxy_url = None login_info = None cached_channels = self._read_channels_file() if not networking: # no network communication, use list of channels from persistdir enabled_channels = cached_channels else: # setup proxy according to up2date self.up2date_cfg = up2date_client.config.initUp2dateConfig() sslcacert = get_ssl_ca_cert(self.up2date_cfg) force_http = self.up2date_cfg['useNoSSLForPackages'], try: login_info = up2date_client.up2dateAuth.getLoginInfo(timeout=self.conf.timeout) except up2dateErrors.RhnServerException as e: logger.error("%s\n%s\n%s", COMMUNICATION_ERROR, RHN_DISABLED, e) return if not login_info: logger.error("%s\n%s", NOT_REGISTERED_ERROR, RHN_DISABLED) self._write_channels_file({}) return try: svrChannels = up2date_client.rhnChannel.getChannelDetails( timeout=self.conf.timeout) except up2dateErrors.CommunicationError as e: logger.error("%s\n%s\n%s", COMMUNICATION_ERROR, RHN_DISABLED, e) return except up2dateErrors.NoChannelsError: logger.error("%s\n%s", NOT_SUBSCRIBED_ERROR, CHANNELS_DISABLED) self._write_channels_file({}) return except up2dateErrors.NoSystemIdError: logger.error("%s %s\n%s\n%s", NOT_SUBSCRIBED_ERROR, NO_SYSTEM_ID_ERROR, USE_RHNREGISTER, RHN_DISABLED) return self.connected_to_spacewalk = True logger.info(UPDATES_FROM_SPACEWALK) for channel in svrChannels: if channel['version']: enabled_channels[channel['label']] = dict(channel.items()) self._write_channels_file(enabled_channels) repos = self.base.repos for (channel_id, channel_dict) in enabled_channels.items(): cached_channel = cached_channels.get(channel_id) cached_version = None if cached_channel: cached_version = cached_channel.get('version') conf = copy(self.conf) if channel_id in self.parser.sections(): options = self.parser.items(channel_id) for (key, value) in options: setattr(conf, key, value) repo = SpacewalkRepo(channel_dict, { 'cachedir' : self.base.conf.cachedir, 'proxy' : proxy_url, 'timeout' : conf.timeout, 'sslcacert' : sslcacert, 'force_http': force_http, 'cached_version' : cached_version, 'login_info': login_info, 'gpgcheck': conf.gpgcheck, 'enabled': conf.enabled, }) repos.add(repo) # DEBUG logger.debug(enabled_channels)