def run(self):
        """Execute the util action here."""

        if self.opts.source:
            pkgs = self._get_pkg_objs_source(self.opts.packages)
        elif self.opts.debuginfo:
            pkgs = self._get_pkg_objs_debuginfo(self.opts.packages)
        else:
            pkgs = self._get_pkg_objs_rpms(self.opts.packages)

        # If user asked for just urls then print them and we're done
        if self.opts.url:
            for pkg in pkgs:
                # command line repo packages do not have .remote_location
                if pkg.repoid != hawkey.CMDLINE_REPO_NAME:
                    url = pkg.remote_location(schemes=self.opts.urlprotocols)
                    if url:
                        print(url)
                    else:
                        msg = _("Failed to get mirror for package: %s") % pkg.name
                        if self.base.conf.strict:
                            raise dnf.exceptions.Error(msg)
                        logger.warning(msg)
            return
        else:
            self._do_downloads(pkgs)  # download rpms
예제 #2
0
    def _src_deps(self, src_fn):
        fd = os.open(src_fn, os.O_RDONLY)
        try:
            h = self._rpm_ts.hdrFromFdno(fd)
        except rpm.error as e:
            if str(e) == 'error reading package header':
                e = _("Failed to open: '%s', not a valid source rpm file."
                      ) % src_fn
            os.close(fd)
            raise dnf.exceptions.Error(e)
        os.close(fd)
        ds = h.dsFromHeader('requirename')
        done = True
        for dep in ds:
            reldep_str = self._rpm_dep2reldep_str(dep)
            if reldep_str.startswith('rpmlib('):
                continue
            done &= self._install(reldep_str)

        if not done:
            err = _("Not all dependencies satisfied")
            raise dnf.exceptions.Error(err)

        if self.opts.define:
            logger.warning(
                _("Warning: -D or --define arguments have no meaning "
                  "for source rpm packages."))
예제 #3
0
    def configure(self):
        # setup sack and populate it with enabled repos
        demands = self.cli.demands
        demands.available_repos = True

        # if no argument was passed then error
        if (not (self.opts.add_repo != [] or self.opts.save or self.opts.dump
                 or self.opts.dump_variables or self.opts.set_disabled
                 or self.opts.set_enabled)):
            self.cli.optparser.error(
                _("one of the following arguments is required: {}").format(
                    ' '.join([
                        "--save", "--add-repo", "--dump", "--dump-variables",
                        "--set-enabled", "--enable", "--set-disabled",
                        "--disable"
                    ])))

        # warn with hint if --enablerepo or --disablerepo argument was passed
        if self.opts.repos_ed != []:
            logger.warning(
                _("Warning: --enablerepo/--disablerepo arguments have no meaning"
                  "with config manager. Use --set-enabled/--set-disabled instead."
                  ))

        if (self.opts.save or self.opts.set_enabled or self.opts.set_disabled
                or self.opts.add_repo):
            demands.root_user = True
예제 #4
0
    def migrate_groups(self):
        yum_exec = "/usr/bin/yum-deprecated"
        if not os.path.exists(yum_exec):
            yum_exec = "/usr/bin/yum"
        logger.info(_("Migrating groups data..."))

        try:
            installed = self.get_yum_installed_groups(yum_exec)
        except subprocess.CalledProcessError:
            logger.warning(_("Execution of Yum failed. "
                             "Could not retrieve installed groups."))
            return
        if not installed:
            logger.info(_("No groups to migrate from Yum"))
            return

        # mark installed groups in dnf
        group_cmd = dnf.cli.commands.group.GroupCommand(self.cli)
        group_cmd._grp_setup()
        for group in installed:
            try:
                group_cmd._mark_install([group])
            except dnf.exceptions.CompsError as e:
                # skips not found groups, i.e. after fedup
                # when the group name changes / disappears in new distro
                logger.warning("%s, %s", dnf.i18n.ucd(e)[:-1], _("skipping."))
예제 #5
0
    def _install(self, reldep_str):
        # Try to find something by provides
        sltr = dnf.selector.Selector(self.base.sack)
        sltr.set(provides=reldep_str)
        found = sltr.matches()
        if not found and reldep_str.startswith("/"):
            # Nothing matches by provides and since it's file, try by files
            sltr = dnf.selector.Selector(self.base.sack)
            sltr.set(file=reldep_str)
            found = sltr.matches()

        if not found and not reldep_str.startswith("("):
            # No provides, no files
            # Richdeps can have no matches but it could be correct (solver must decide later)
            msg = _("No matching package to install: '%s'")
            logger.warning(msg, reldep_str)
            return self.opts.skip_unavailable is True

        if found:
            already_inst = self.base._sltr_matches_installed(sltr)
            if already_inst:
                for package in already_inst:
                    dnf.base._msg_installed(package)
        self.base._goal.install(select=sltr, optional=False)
        return True
예제 #6
0
    def _download_packages(self, to_download):
        pkg_list = []
        for pkg in to_download:
            pkg_spec = '{name}-{evr}.{arch}'.format(
                name=pkg.name,
                evr=pkg.evr,
                arch=pkg.arch,
            )
            subj = dnf.subject.Subject(pkg_spec)

            q = subj.get_best_query(self.base.sack).available().latest()
            sources = list(q)
            if not sources:
                logger.warning(
                    _("package {0} not available in repos, trying local cache".
                      format(pkg_spec)))
                continue

            if len(sources) > 1:
                logger.warning(_("package %s is in multiple repositories"),
                               pkg_spec)

            pkg_list.extend(sources[:1])

        self.base.download_packages(pkg_list)
예제 #7
0
    def run(self):
        """Execute the util action here."""

        if self.opts.source:
            pkgs = self._get_pkg_objs_source(self.opts.packages)
        elif self.opts.debuginfo:
            pkgs = self._get_pkg_objs_debuginfo(self.opts.packages)
        else:
            pkgs = self._get_pkg_objs_rpms(self.opts.packages)

        # If user asked for just urls then print them and we're done
        if self.opts.url:
            for pkg in pkgs:
                # command line repo packages do not have .remote_location
                if pkg.repoid != hawkey.CMDLINE_REPO_NAME:
                    url = pkg.remote_location(schemes=self.opts.urlprotocols)
                    if url:
                        print(url)
                    else:
                        msg = _(
                            "Failed to get mirror for package: %s") % pkg.name
                        if self.base.conf.strict:
                            raise dnf.exceptions.Error(msg)
                        logger.warning(msg)
            return
        else:
            self._do_downloads(pkgs)  # download rpms
예제 #8
0
    def configure(self):
        # setup sack and populate it with enabled repos
        demands = self.cli.demands
        demands.available_repos = True

        # if no argument was passed then error
        if (not (self.opts.add_repo != [] or self.opts.save or self.opts.dump
                 or self.opts.dump_variables or self.opts.set_disabled
                 or self.opts.set_enabled)):
            self.cli.optparser.error(
                _("one of the following arguments is required: {}").format(
                    ' '.join([
                        "--save", "--add-repo", "--dump", "--dump-variables",
                        "--set-enabled", "--enable", "--set-disabled",
                        "--disable"
                    ])))

        # warn with hint if --enablerepo or --disablerepo argument was passed
        if self.opts.repos_ed != []:
            logger.warning(
                _("Warning: --enablerepo/--disablerepo arguments have no meaning"
                  "with config manager. Use --set-enabled/--set-disabled instead."
                  ))

        if (self.opts.save or self.opts.set_enabled or self.opts.set_disabled
                or self.opts.add_repo):
            demands.root_user = True

        # sanitize commas https://bugzilla.redhat.com/show_bug.cgi?id=1830530
        temp_list = [x.split(',') for x in self.opts.crepo if x != ',']
        # flatten sublists
        self.opts.crepo = [
            item for sublist in temp_list for item in sublist if item != ''
        ]
예제 #9
0
def get_options_from_dir(filepath, base):
    """
    Provide filepath as string if single dir or list of strings
    Return set of package names contained in files under filepath
    """

    if not os.path.exists(filepath):
        return set()
    options = set()
    for file in os.listdir(filepath):
        if os.path.isdir(file) or not file.endswith('.conf'):
            continue

        with open(os.path.join(filepath, file)) as fp:
            for line in fp:
                options.add((line.rstrip(), file))

    packages = set()
    for pkg in base.sack.query().installed().filter(
            name={x[0]
                  for x in options}):
        packages.add(pkg.name)
    for name, file in {x for x in options if x[0] not in packages}:
        logger.warning(
            _('No installed package found for package name "{pkg}" '
              'specified in needs-restarting file "{file}".'.format(
                  pkg=name, file=file)))
    return packages
예제 #10
0
 def _install(self, reldep_str):
     try:
         self.base.install(reldep_str)
     except dnf.exceptions.MarkingError:
         msg = _("No matching package to install: '%s'")
         logger.warning(msg, reldep_str)
         return False
     return True
예제 #11
0
 def _install(self, reldep_str):
     try:
         self.base.install(reldep_str)
     except dnf.exceptions.MarkingError:
         msg = _("No matching package to install: '%s'")
         logger.warning(msg, reldep_str)
         return False
     return True
예제 #12
0
 def print_urls(self, pkglist):
     for pkg in pkglist:
         url = pkg.remote_location()
         if url:
             print(url)
         else:
             msg = _("Failed to get mirror for package: %s") % pkg.name
             logger.warning(msg)
예제 #13
0
 def operate_pkg(self, operate, pkgname):
     try:
         eval('self.base.%s("%s")' % (operate, pkgname))
     except dnf.exceptions.MarkingError:
         msg = _("No matching package to install: '%s'")
         logger.warning(msg, pkgname)
         return False
     return True
예제 #14
0
 def operate_pkg(self, operate, pkgname):
     try:
         eval('self.base.%s("%s")' % (operate, pkgname))
     except dnf.exceptions.MarkingError:
         msg = _("No matching package to install: '%s'")
         logger.warning(msg, pkgname)
         return False
     return True
예제 #15
0
	def run(self):
		if self.opts.swidtagscmd[0] in ( "purge", "sync", "regen" ):
			self.plugin.purge_generated_dir()
			self.plugin.purge_generated_symlink()
		else:
			print("dnf swidtags [sync | purge]")

		if self.opts.swidtagscmd[0] in ( "sync", "regen" ):
			ts = rpm.transaction.initReadOnlyTransaction(root=self.base.conf.installroot)
			pkgs = []
			for p in ts.dbMatch():
				# Filter out imported GPG keys
				if p["arch"]:
					pkgs.append(p)

			dirs = {}
			for r in self.base.repos.iter_enabled():
				if not hasattr(r, "get_metadata_path"):
					continue
				file = r.get_metadata_path(self.plugin.METADATA_TYPE)
				if not file or file == "":
					continue
				s = repodata.Swidtags(None, file)
				tags = s.tags_for_rpm_packages(pkgs)

				remaining_pkgs = []
				for p in pkgs:
					if p not in tags:
						remaining_pkgs.append(p)
						continue
					found = False
					for t in tags[p]:
						logger.debug("Retrieved SWID tag from repodata for %s: %s", get_nevra(p), t.get_tagid())
						x = t.save_to_directory(self.plugin.dir_downloaded)
						dirs[x[0]] = True
						found = True
					if not found:
						remaining_pkgs.append(p)

				pkgs = remaining_pkgs

			for d in dirs:
				self.plugin.create_swidtags_d_symlink(path.basename(d))

			if len(pkgs) > 0:
				run_ret = self.plugin.run_rpm2swidtag_for([ get_nevra(p) for p in pkgs ])
				if run_ret == 0:
					pkgs_missing = {}
					for p in pkgs:
						pkgs_missing[get_checksum(p)] = p
					for f in iglob(path.join(self.plugin.dir_generated, "*-rpm-*.swidtag")):
						m = re.search(r'-rpm-([0-9a-f]{40}([0-9a-f]{24})?)\.swidtag$', f)
						if m and m.group(1) in pkgs_missing:
							del pkgs_missing[m.group(1)]
					for p in pkgs_missing.values():
						logger.warning("The SWID tag for rpm %s should have been generated but could not be found", get_nevra(p))
				if run_ret == -2:
					logger.warning("The rpm2swidtag_command not configured for the %s plugin.\nSWID tags not generated locally for %d packages.", NAME, len(pkgs))
예제 #16
0
 def config(self):
     parser = self.read_config(self.base.conf)
     try:
         self._autoupdate = (
             parser.has_section('main')
             and parser.has_option('main', KPATCH_UPDATE_OPT)
             and parser.getboolean('main', KPATCH_UPDATE_OPT))
     except Exception as e:
         logger.warning(_("Parsing file failed: {}").format(str(e)))
예제 #17
0
	def get_nevra_checksum(self, nevra, verbose=True):
		if not self.ts:
			ts = rpm.transaction.initReadOnlyTransaction(root=self.base.conf.installroot)
		rpms = ( ts.dbMatch(2, str(nevra)) )
		if len(rpms) > 1:
			if verbose:
				logger.warning("Multiple rpms %s found installed for package %s.", str(rpms), str(nevra))
			return None
		for r in rpms:
			checksum = get_checksum(r)
			if checksum:
				return checksum
			if verbose:
				logger.warning("No checksum found for rpm %s.", str(nevra))
		return None
예제 #18
0
def list_opened_files(uid):
    for (pid, smaps) in list_smaps():
        try:
            if uid is not None and uid != owner_uid(smaps):
                continue
            with open(smaps, 'r') as smaps_file:
                lines = smaps_file.readlines()
        except EnvironmentError:
            logger.warning("Failed to read PID %d's smaps.", pid)
            continue

        for line in lines:
            ofile = smap2opened_file(pid, line)
            if ofile is not None:
                yield ofile
예제 #19
0
def list_opened_files(uid):
    for (pid, smaps) in list_smaps():
        try:
            if uid is not None and uid != owner_uid(smaps):
                continue
            with open(smaps, 'r') as smaps_file:
                lines = smaps_file.readlines()
        except EnvironmentError:
            logger.warning("Can not to read PID's %d smaps.", pid)
            continue

        for line in lines:
            ofile = smap2opened_file(pid, line)
            if ofile is not None:
                yield ofile
예제 #20
0
	def purge_generated_dir(self):
		if not path.isdir(self.dir_generated):
			return
		count = 0
		for f in listdir(self.dir_generated):
			try:
				unlink(path.join(self.dir_generated, f))
				count += 1
			except OSError as e:
				logger.warning("Failed to remove [%s]: %s", f, e)
		try:
			rmdir(self.dir_generated)
		except OSError:
			logger.warning("Failed to remove [%s]: %s", self.dir_generated, e)
		if count > 0:
			logger.debug("Removed %d generated files from %s", count, self.dir_generated)
예제 #21
0
    def migrate_yumdb(self):
        """Migrate YUMDB data."""
        attribute2mandatory = {
            "changed_by": False, "checksum_data": True, "checksum_type": True,
            "command_line": False, "from_repo": True,
            "from_repo_revision": False, "from_repo_timestamp": False,
            "installed_by": False, "reason": True, "releasever": True}
        migrated = skipped = 0
        logger.info(_("Migrating YUMDB data..."))
        try:
            with contextlib.closing(_YumBase()) as yumbase:
                for pkgtup, pkgid in yumbase.iter_yumdb(logger.warning):
                    nevra = "{0[0]}-{0[3]}-{0[4]}.{0[1]}".format(pkgtup)
                    dnfdata = self.base.yumdb.get_package(
                        pkgtup=pkgtup, pkgid=pkgid)
                    if next(iter(dnfdata), None) is not None:
                        logger.warning("%s found in DNFDB; skipping", nevra)
                        skipped += 1
                        continue

                    yumdata = yumbase.rpmdb.yumdb.get_package(
                        pkgtup=pkgtup, pkgid=pkgid)
                    for attribute, mandat in attribute2mandatory.items():
                        try:
                            value = getattr(yumdata, attribute)
                        except AttributeError:
                            lvl = logging.WARNING if mandat else logging.DEBUG
                            msg = _("%s of %s not found")
                            logger.log(lvl, msg, attribute, nevra)
                            continue
                        if isinstance(value, bytes):
                            value = value.decode("utf-8", "replace")
                            if '\ufffd' in value:
                                msg = _(
                                    "replacing unknown characters in %s of %s")
                                logger.warning(msg, attribute, nevra)
                        try:
                            setattr(dnfdata, attribute, value)
                        except (OSError, IOError):
                            msg = _("DNFDB access denied")
                            raise dnf.exceptions.Error(msg)
                        logger.debug(_("%s of %s migrated"), attribute, nevra)
                    migrated += 1
        finally:
            logger.info(
                _("%d YUMDB records found, %d migrated, %d skipped/preserved"),
                migrated + skipped, migrated, skipped)
예제 #22
0
    def _get_packages_from_modules(self, module_spec):
        """Gets packages from modules matching module spec
        1. From module artifacts
        2. From module profiles"""
        result_query = self.base.sack.query().filterm(empty=True)
        module_base = dnf.module.module_base.ModuleBase(self.base)
        module_list, nsvcap = module_base.get_modules(module_spec)
        if self.opts.newest_only:
            module_list = self.base._moduleContainer.getLatestModules(
                module_list, False)
        for module in module_list:
            for artifact in module.getArtifacts():
                query = self.base.sack.query(
                    flags=hawkey.IGNORE_EXCLUDES).filterm(
                        nevra_strict=artifact)
                if query:
                    result_query = result_query.union(query)
                else:
                    msg = _("No match for artifact '{0}' from module '{1}'"
                            ).format(artifact, module.getFullIdentifier())
                    logger.warning(msg)
            if nsvcap.profile:
                profiles_set = module.getProfiles(nsvcap.profile)
            else:
                profiles_set = module.getProfiles()
            if profiles_set:
                for profile in profiles_set:
                    for pkg_name in profile.getContent():
                        query = self.base.sack.query(
                            flags=hawkey.IGNORE_EXCLUDES).filterm(
                                name=pkg_name)
                        # Prefer to add modular providers selected by argument
                        if result_query.intersection(query):
                            continue
                        # Add all packages with the same name as profile described
                        elif query:
                            result_query = result_query.union(query)
                        else:
                            msg = _("No match for package name '{0}' in profile {1} from module {2}")\
                                .format(pkg_name, profile.getName(), module.getFullIdentifier())
                            logger.warning(msg)
        if not module_list:
            msg = _("No mach for argument '{}'").format(module_spec)
            raise dnf.exceptions.Error(msg)

        return result_query
예제 #23
0
 def _get_packages_with_deps(self, pkg_specs, source=False):
     """Get packages matching pkg_specs and the deps."""
     pkgs = self._get_packages(pkg_specs)
     pkg_set = set(pkgs)
     for pkg in pkgs:
         goal = hawkey.Goal(self.base.sack)
         goal.install(pkg)
         rc = goal.run()
         if rc:
             pkg_set.update(goal.list_installs())
             pkg_set.update(goal.list_upgrades())
         else:
             msg = [_('Error in resolve of packages:')]
             logger.warning("\n    ".join(msg + [str(pkg) for pkg in pkgs]))
             logger.warning(
                 dnf.util._format_resolve_problems(goal.problem_rules()))
             return []
     return pkg_set
 def _get_packages_with_deps(self, pkg_specs, source=False):
     """Get packages matching pkg_specs and the deps."""
     pkgs = self._get_packages(pkg_specs)
     goal = hawkey.Goal(self.base.sack)
     for pkg in pkgs:
         goal.install(pkg)
     rc = goal.run()
     if rc:
         new_pkgs = goal.list_installs() + goal.list_upgrades()
         for pkg in pkgs:
             if pkg not in new_pkgs:
                 new_pkgs += [pkg]
         return new_pkgs
     else:
         msg = [_('Error in resolve of packages:')]
         logger.warning("\n    ".join(msg + [str(pkg) for pkg in pkgs]))
         logger.warning(dnf.util._format_resolve_problems(goal.problem_rules()))
         return []
예제 #25
0
 def run(self):
     self.base.conf.keepcache = True
     for repo in self.base.repos.iter_enabled():
         if self.opts.remote_time:
             repo._repo.setPreserveRemoteTime(True)
         if self.opts.download_metadata:
             if self.opts.urls:
                 for md_type, md_location in repo._repo.getMetadataLocations(
                 ):
                     url = repo.remote_location(md_location)
                     if url:
                         print(url)
                     else:
                         msg = _("Failed to get mirror for metadata: %s"
                                 ) % md_type
                         logger.warning(msg)
             else:
                 self.download_metadata(repo)
         if self.opts.downloadcomps:
             if self.opts.urls:
                 mdl = dict(repo._repo.getMetadataLocations())
                 group_locations = [
                     mdl[md_type]
                     for md_type in ('group', 'group_gz', 'group_gz_zck')
                     if md_type in mdl
                 ]
                 if group_locations:
                     for group_location in group_locations:
                         url = repo.remote_location(group_location)
                         if url:
                             print(url)
                             break
                     else:
                         msg = _("Failed to get mirror for the group file.")
                         logger.warning(msg)
             else:
                 self.getcomps(repo)
         pkglist = self.get_pkglist(repo)
         if self.opts.urls:
             self.print_urls(pkglist)
         else:
             self.download_packages(pkglist)
         if self.opts.delete:
             self.delete_old_local_packages(repo, pkglist)
예제 #26
0
    def _get_providers_of_requires(self, to_test, done=None, req_dict=None):
        done = done if done else to_test
        # req_dict = {}  {req : set(pkgs)}
        if req_dict is None:
            req_dict = {}
        test_requires = []
        for pkg in to_test:
            for require in pkg.requires:
                if require not in req_dict:
                    test_requires.append(require)
                req_dict.setdefault(require, set()).add(pkg)

        if self.opts.newest_only:
            #  Prepare cache with all packages related affected by modular filtering
            names = set()
            for module in self.base._moduleContainer.getModulePackages():
                for artifact in module.getArtifacts():
                    name, __, __ = artifact.rsplit("-", 2)
                    names.add(name)
            modular_related = self.base.sack.query(
                flags=hawkey.IGNORE_EXCLUDES).filterm(provides=names)

        requires = self.base.sack.query().filterm(empty=True)
        for require in test_requires:
            q = self.base.sack.query(flags=hawkey.IGNORE_EXCLUDES).filterm(
                provides=require)

            if not q:
                #  TODO(jmracek) Shell we end with an error or with RC 1?
                logger.warning(
                    (_("Unable to satisfy require {}").format(require)))
            else:
                if self.opts.newest_only:
                    if not modular_related.intersection(q):
                        q.filterm(latest_per_arch_by_priority=1)
                requires = requires.union(q.difference(done))
        done = done.union(requires)
        if requires:
            done = self._get_providers_of_requires(requires,
                                                   done=done,
                                                   req_dict=req_dict)

        return done
예제 #27
0
    def _update_plugin_cfg(self, value):
        cfg_file = _get_plugin_cfg_file(self.base.conf)
        if cfg_file is None:
            logger.warning("Couldn't find configuration file")
            return
        try:
            parser = configparser.ConfigParser()
            parser.read(cfg_file)
        except Exception as e:
            raise dnf.exceptions.Error(
                _("Parsing file failed: {}").format(str(e)))

        if not parser.has_section('main'):
            parser.add_section('main')
        parser.set('main', KPATCH_UPDATE_OPT, str(value))

        try:
            with open(cfg_file, 'w') as cfg_stream:
                parser.write(cfg_stream)
        except Exception as e:
            raise dnf.exceptions.Error(
                _("Failed to update conf file: {}").format(str(e)))
예제 #28
0
    def __init__(self, base, cli):
        super(Spacewalk, self).__init__(base, cli)
        self.base = base
        self.cli = cli
        self.stored_channels_path = os.path.join(self.base.conf.persistdir,
                                                 STORED_CHANNELS_NAME)
        self.connected_to_spacewalk = False
        self.up2date_cfg = {}
        self.conf = copy(self.base.conf)
        self.parser = self.read_config(self.conf)
        if "main" in self.parser.sections():
            options = self.parser.items("main")
            for (key, value) in options:
                self.conf._set_value(key, value, PRIO_PLUGINCONFIG)
        if not dnf.util.am_i_root():
            logger.warning(MUST_BE_ROOT)
            self.conf.enabled = False
        if not self.conf.enabled:
            return
        logger.debug('initialized Spacewalk plugin')

        self.activate_channels()
예제 #29
0
    def __init__(self, base, cli):
        super(Spacewalk, self).__init__(base, cli)
        self.base = base
        self.cli = cli
        self.stored_channels_path = os.path.join(self.base.conf.persistdir,
                                                 STORED_CHANNELS_NAME)
        self.connected_to_spacewalk = False
        self.up2date_cfg = {}
        self.conf = copy(self.base.conf)
        self.parser = self.read_config(self.conf)
        if "main" in self.parser.sections():
            options = self.parser.items("main")
            for (key, value) in options:
                self.conf._set_value(key, value, PRIO_PLUGINCONFIG)
        if not dnf.util.am_i_root():
            logger.warning(MUST_BE_ROOT)
            self.conf.enabled = False
        if not self.conf.enabled:
            return
        logger.debug('initialized Spacewalk plugin')

        self.activate_channels()
    def _install(self, reldep_str):
        # Try to find something by provides
        sltr = dnf.selector.Selector(self.base.sack)
        sltr.set(provides=reldep_str)
        found = sltr.matches()
        if not found and reldep_str.startswith("/"):
            # Nothing matches by provides and since it's file, try by files
            sltr = dnf.selector.Selector(self.base.sack)
            sltr.set(file=reldep_str)
            found = sltr.matches()

        if not found:
            # No provides, no files
            msg = _("No matching package to install: '%s'")
            logger.warning(msg, reldep_str)
            return False

        already_inst = self.base._sltr_matches_installed(sltr)
        if already_inst:
            for package in already_inst:
                dnf.base._msg_installed(package)
        self.base._goal.install(select=sltr, optional=False)
        return True
예제 #31
0
    def _install(self, reldep_str):
        # Try to find something by provides
        sltr = dnf.selector.Selector(self.base.sack)
        sltr.set(provides=reldep_str)
        found = sltr.matches()
        if not found and reldep_str.startswith("/"):
            # Nothing matches by provides and since it's file, try by files
            sltr = dnf.selector.Selector(self.base.sack)
            sltr.set(file=reldep_str)
            found = sltr.matches()

        if not found:
            # No provides, no files
            msg = _("No matching package to install: '%s'")
            logger.warning(msg, reldep_str)
            return False

        already_inst = self.base._sltr_matches_installed(sltr)
        if already_inst:
            for package in already_inst:
                dnf.base._msg_installed(package)
        self.base._goal.install(select=sltr, optional=False)
        return True
예제 #32
0
    def __init__(self, channel, opts):
        super(SpacewalkRepo, self).__init__(ustr(channel['label']),
                                            opts.get('conf'))
        # dnf stuff
        self.name = ustr(channel['name'])
        self.baseurl = [url + '/GET-REQ/' + self.id for url in channel['url']]
        self.sslcacert = opts.get('sslcacert')
        self.proxy = opts.get('proxy')
        try:
            self.gpgkey = get_gpg_key_urls(channel['gpg_key_url'])
        except InvalidGpgKeyLocation as e:
            logger.warning(GPG_KEY_REJECTED, dnf.i18n.ucd(e))
            self.gpgkey = []
        if channel['version'] != opts.get('cached_version'):
            self.metadata_expire = 1

        # spacewalk stuff
        self.login_info = opts.get('login_info')
        self.keepalive = 0
        self.bandwidth = 0
        self.retries = 1
        self.throttle = 0
        self.timeout = opts.get('timeout')
        self.gpgcheck = opts.get('gpgcheck')
        self.force_http = opts.get('force_http')

        if opts.get('enabled'):
            self.enable()
        else:
            self.disable()

        if hasattr(self, 'set_http_headers'):
            # dnf > 4.0.9  on RHEL 8, Fedora 29/30
            http_headers = self.create_http_headers()
            if http_headers:
                self.set_http_headers(http_headers)
예제 #33
0
    def __init__(self, channel, opts):
        super(SpacewalkRepo, self).__init__(ustr(channel['label']),
                                            opts.get('conf'))
        # dnf stuff
        self.name = ustr(channel['name'])
        self.baseurl = [ url + '/GET-REQ/' + self.id for url in channel['url']]
        self.sslcacert = opts.get('sslcacert')
        self.proxy = opts.get('proxy')
        try:
            self.gpgkey = get_gpg_key_urls(channel['gpg_key_url'])
        except InvalidGpgKeyLocation as e:
            logger.warning(GPG_KEY_REJECTED, dnf.i18n.ucd(e))
            self.gpgkey = []
        if channel['version'] != opts.get('cached_version'):
            self.metadata_expire = 1

        # spacewalk stuff
        self.login_info = opts.get('login_info')
        self.keepalive = 0
        self.bandwidth = 0
        self.retries = 1
        self.throttle = 0
        self.timeout = opts.get('timeout')
        self.gpgcheck = opts.get('gpgcheck')
        self.force_http = opts.get('force_http')

        if opts.get('enabled'):
            self.enable()
        else:
            self.disable()

        if hasattr(self, 'set_http_headers'):
            # dnf > 4.0.9  on RHEL 8, Fedora 29/30
            http_headers = self.create_http_headers()
            if http_headers:
                self.set_http_headers(http_headers)
예제 #34
0
    def run(self):
        if self.opts.new and self.opts.old:
            raise dnf.exceptions.Error(
                _("Pass either --old or --new, not both!"))

        rpm_list = []
        rpm_list = self._get_file_list(self.opts.path, ".rpm")
        verfile = {}
        pkgdict = {}

        keepnum = int(self.opts.keep) * (-1)  # the number of items to keep

        if len(rpm_list) == 0:
            raise dnf.exceptions.Error(_("No files to process"))

        try:
            self.base.add_remote_rpms(rpm_list,
                                      progress=self.base.output.progress)
        except IOError:
            logger.warning(_("Could not open {}").format(', '.join(rpm_list)))

        packages = [x for x in self.base.sack.query().available()]
        packages.sort()
        for pkg in packages:
            na = (pkg.name, pkg.arch)
            if na in pkgdict:
                pkgdict[na].append(pkg)
            else:
                pkgdict[na] = [pkg]

            nevra = self._package_to_nevra(pkg)
            if nevra in verfile:
                verfile[nevra].append(self._package_to_path(pkg))
            else:
                verfile[nevra] = [self._package_to_path(pkg)]

        outputpackages = []

        # if new
        if not self.opts.old:
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                if len(evrlist) < abs(keepnum):
                    newevrs = evrlist
                else:
                    newevrs = evrlist[keepnum:]

                for package in newevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

        if self.opts.old:
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                if len(evrlist) < abs(keepnum):
                    continue

                oldevrs = evrlist[:keepnum]
                for package in oldevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

        outputpackages.sort()
        if self.opts.space:
            print(" ".join(outputpackages))
        else:
            for pkg in outputpackages:
                print(pkg)
예제 #35
0
    def edit_group(self, group):
        '''
        Set attributes and package lists for selected group
        '''
        def langlist_to_strdict(lst):
            str_dict = libcomps.StrDict()
            for lang, text in lst:
                str_dict[lang] = text
            return str_dict

        # set group attributes
        if self.opts.name:
            group.name = self.opts.name
        if self.opts.description:
            group.desc = self.opts.description
        if self.opts.display_order:
            group.display_order = self.opts.display_order
        if self.opts.user_visible is not None:
            group.uservisible = self.opts.user_visible
        if self.opts.translated_name:
            group.name_by_lang = langlist_to_strdict(self.opts.translated_name)
        if self.opts.translated_description:
            group.desc_by_lang = langlist_to_strdict(
                self.opts.translated_description)

        # edit packages list
        if self.opts.packages:
            # find packages according to specifications from command line
            packages = set()
            for pkg_spec in self.opts.packages:
                subj = dnf.subject.Subject(pkg_spec)
                q = subj.get_best_query(self.base.sack,
                                        with_nevra=True,
                                        with_provides=False,
                                        with_filenames=False).latest()
                if not q:
                    logger.warning(
                        _("No match for argument: {}").format(pkg_spec))
                    continue
                packages.update(q)
            if self.opts.dependencies:
                # add packages that provide requirements
                requirements = set()
                for pkg in packages:
                    requirements.update(pkg.requires)
                packages.update(
                    self.base.sack.query().filterm(provides=requirements))

            pkg_names = {pkg.name for pkg in packages}

            if self.opts.remove:
                for pkg_name in pkg_names:
                    for pkg in group.packages_match(
                            name=pkg_name, type=libcomps.PACKAGE_TYPE_UNKNOWN):
                        group.packages.remove(pkg)
            else:
                if self.opts.mandatory:
                    pkg_type = libcomps.PACKAGE_TYPE_MANDATORY
                elif self.opts.optional:
                    pkg_type = libcomps.PACKAGE_TYPE_OPTIONAL
                else:
                    pkg_type = libcomps.PACKAGE_TYPE_DEFAULT
                for pkg_name in sorted(pkg_names):
                    if not group.packages_match(name=pkg_name, type=pkg_type):
                        group.packages.append(
                            libcomps.Package(name=pkg_name, type=pkg_type))
예제 #36
0
    def run(self):
        if self.opts.new and self.opts.old:
            raise dnf.exceptions.Error(
                _("Pass either --old or --new, not both!"))

        rpm_list = []
        rpm_list = self._get_file_list(self.opts.path, ".rpm")
        verfile = {}
        pkgdict = {}
        module_dict = {}  # {NameStream: {Version: [modules]}}
        all_modular_artifacts = set()

        keepnum = int(self.opts.keep)  # the number of items to keep

        if len(rpm_list) == 0:
            raise dnf.exceptions.Error(_("No files to process"))

        try:
            this_repo = self.base.repos.add_new_repo("repomanage_repo",
                                                     self.base.conf,
                                                     baseurl=[self.opts.path])
            self.base._add_repo_to_sack(this_repo)
            if dnf.base.WITH_MODULES:
                self.base._setup_modular_excludes()

                # Prepare modules
                module_packages = self.base._moduleContainer.getModulePackages(
                )

                for module_package in module_packages:
                    all_modular_artifacts.update(module_package.getArtifacts())
                    module_dict.setdefault(module_package.getNameStream(),
                                           {}).setdefault(
                                               module_package.getVersionNum(),
                                               []).append(module_package)

        except dnf.exceptions.RepoError:
            self.base.reset(sack=True, repos=True)
            self.base.fill_sack(load_system_repo=False,
                                load_available_repos=False)
            try:
                self.base.add_remote_rpms(rpm_list,
                                          progress=self.base.output.progress)
            except IOError:
                logger.warning(
                    _("Could not open {}").format(', '.join(rpm_list)))

        # Prepare regular packages
        query = self.base.sack.query(
            flags=hawkey.IGNORE_MODULAR_EXCLUDES).available()
        packages = [
            x for x in query.filter(pkg__neq=query.filter(
                nevra_strict=all_modular_artifacts)).available()
        ]
        packages.sort()

        for pkg in packages:
            na = (pkg.name, pkg.arch)
            if na in pkgdict:
                if pkg not in pkgdict[na]:
                    pkgdict[na].append(pkg)
            else:
                pkgdict[na] = [pkg]

            nevra = self._package_to_nevra(pkg)
            if nevra in verfile:
                verfile[nevra].append(self._package_to_path(pkg))
            else:
                verfile[nevra] = [self._package_to_path(pkg)]

        outputpackages = []
        # modular packages
        keepnum_latest_stream_artifacts = set()

        # if new
        if not self.opts.old:
            # regular packages
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                newevrs = evrlist[-keepnum:]

                for package in newevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

            # modular packages
            for streams_by_version in module_dict.values():
                sorted_stream_versions = sorted(streams_by_version.keys(),
                                                reverse=True)

                new_sorted_stream_versions = sorted_stream_versions[-keepnum:]

                for i in new_sorted_stream_versions:
                    for stream in streams_by_version[i]:
                        keepnum_latest_stream_artifacts.update(
                            set(stream.getArtifacts()))

        if self.opts.old:
            # regular packages
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                oldevrs = evrlist[:-keepnum]

                for package in oldevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

            # modular packages
            for streams_by_version in module_dict.values():
                sorted_stream_versions = sorted(streams_by_version.keys(),
                                                reverse=True)

                old_sorted_stream_versions = sorted_stream_versions[:-keepnum]

                for i in old_sorted_stream_versions:
                    for stream in streams_by_version[i]:
                        keepnum_latest_stream_artifacts.update(
                            set(stream.getArtifacts()))

        modular_packages = [
            self._package_to_path(x)
            for x in query.filter(pkg__eq=query.filter(
                nevra_strict=keepnum_latest_stream_artifacts)).available()
        ]
        outputpackages = outputpackages + modular_packages
        outputpackages.sort()
        if self.opts.space:
            print(" ".join(outputpackages))
        else:
            for pkg in outputpackages:
                print(pkg)
예제 #37
0
    def run(self):
        if self.opts.new and self.opts.old:
            raise dnf.exceptions.Error(
                _("Pass either --old or --new, not both!"))
        if self.opts.new and self.opts.oldonly:
            raise dnf.exceptions.Error(
                _("Pass either --oldonly or --new, not both!"))
        if self.opts.old and self.opts.oldonly:
            raise dnf.exceptions.Error(
                _("Pass either --old or --oldonly, not both!"))
        if not self.opts.old and not self.opts.oldonly:
            self.opts.new = True

        verfile = {}
        pkgdict = {}
        module_dict = {}  # {NameStream: {Version: [modules]}}
        all_modular_artifacts = set()

        keepnum = int(self.opts.keep)  # the number of items to keep

        try:
            REPOMANAGE_REPOID = "repomanage_repo"
            repo_conf = self.base.repos.add_new_repo(REPOMANAGE_REPOID,
                                                     self.base.conf,
                                                     baseurl=[self.opts.path])
            # Always expire the repo, otherwise repomanage could use cached metadata and give identical results
            # for multiple runs even if the actual repo changed in the meantime
            repo_conf._repo.expire()
            self.base._add_repo_to_sack(repo_conf)
            if dnf.base.WITH_MODULES:
                self.base._setup_modular_excludes()

                # Prepare modules
                module_packages = self.base._moduleContainer.getModulePackages(
                )

                for module_package in module_packages:
                    # Even though we load only REPOMANAGE_REPOID other modules can be loaded from system
                    # failsafe data automatically, we don't want them affecting repomanage results so ONLY
                    # use modules from REPOMANAGE_REPOID.
                    if module_package.getRepoID() == REPOMANAGE_REPOID:
                        all_modular_artifacts.update(
                            module_package.getArtifacts())
                        module_dict.setdefault(
                            module_package.getNameStream(),
                            {}).setdefault(module_package.getVersionNum(),
                                           []).append(module_package)

        except dnf.exceptions.RepoError:
            rpm_list = []
            rpm_list = self._get_file_list(self.opts.path, ".rpm")
            if len(rpm_list) == 0:
                raise dnf.exceptions.Error(_("No files to process"))

            self.base.reset(sack=True, repos=True)
            self.base.fill_sack(load_system_repo=False,
                                load_available_repos=False)
            try:
                self.base.add_remote_rpms(rpm_list,
                                          progress=self.base.output.progress)
            except IOError:
                logger.warning(
                    _("Could not open {}").format(', '.join(rpm_list)))

        # Prepare regular packages
        query = self.base.sack.query(
            flags=hawkey.IGNORE_MODULAR_EXCLUDES).available()
        packages = [
            x for x in query.filter(pkg__neq=query.filter(
                nevra_strict=all_modular_artifacts)).available()
        ]
        packages.sort()

        for pkg in packages:
            na = (pkg.name, pkg.arch)
            if na in pkgdict:
                if pkg not in pkgdict[na]:
                    pkgdict[na].append(pkg)
            else:
                pkgdict[na] = [pkg]

            nevra = self._package_to_nevra(pkg)
            if nevra in verfile:
                verfile[nevra].append(self._package_to_path(pkg))
            else:
                verfile[nevra] = [self._package_to_path(pkg)]

        outputpackages = []
        # modular packages
        keepnum_latest_stream_artifacts = set()

        if self.opts.new:
            # regular packages
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                newevrs = evrlist[-keepnum:]

                for package in newevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

            # modular packages
            for streams_by_version in module_dict.values():
                sorted_stream_versions = sorted(streams_by_version.keys())

                new_sorted_stream_versions = sorted_stream_versions[-keepnum:]

                for i in new_sorted_stream_versions:
                    for stream in streams_by_version[i]:
                        keepnum_latest_stream_artifacts.update(
                            set(stream.getArtifacts()))

        if self.opts.old:
            # regular packages
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                oldevrs = evrlist[:-keepnum]

                for package in oldevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

            # modular packages
            for streams_by_version in module_dict.values():
                sorted_stream_versions = sorted(streams_by_version.keys())

                old_sorted_stream_versions = sorted_stream_versions[:-keepnum]

                for i in old_sorted_stream_versions:
                    for stream in streams_by_version[i]:
                        keepnum_latest_stream_artifacts.update(
                            set(stream.getArtifacts()))

        if self.opts.oldonly:
            # regular packages
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                oldevrs = evrlist[:-keepnum]

                for package in oldevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

            # modular packages
            keepnum_newer_stream_artifacts = set()

            for streams_by_version in module_dict.values():
                sorted_stream_versions = sorted(streams_by_version.keys())

                new_sorted_stream_versions = sorted_stream_versions[-keepnum:]

                for i in new_sorted_stream_versions:
                    for stream in streams_by_version[i]:
                        keepnum_newer_stream_artifacts.update(
                            set(stream.getArtifacts()))

            for streams_by_version in module_dict.values():
                sorted_stream_versions = sorted(streams_by_version.keys())

                old_sorted_stream_versions = sorted_stream_versions[:-keepnum]

                for i in old_sorted_stream_versions:
                    for stream in streams_by_version[i]:
                        for artifact in stream.getArtifacts():
                            if artifact not in keepnum_newer_stream_artifacts:
                                keepnum_latest_stream_artifacts.add(artifact)

        modular_packages = [
            self._package_to_path(x)
            for x in query.filter(pkg__eq=query.filter(
                nevra_strict=keepnum_latest_stream_artifacts)).available()
        ]
        outputpackages = outputpackages + modular_packages
        outputpackages.sort()
        if self.opts.space:
            print(" ".join(outputpackages))
        else:
            for pkg in outputpackages:
                print(pkg)
예제 #38
0
 def run(self):
     self.base.conf.keepcache = True
     gpgcheck_ok = True
     for repo in self.base.repos.iter_enabled():
         if self.opts.remote_time:
             repo._repo.setPreserveRemoteTime(True)
         if self.opts.download_metadata:
             if self.opts.urls:
                 for md_type, md_location in repo._repo.getMetadataLocations(
                 ):
                     url = repo.remote_location(md_location)
                     if url:
                         print(url)
                     else:
                         msg = _("Failed to get mirror for metadata: %s"
                                 ) % md_type
                         logger.warning(msg)
             else:
                 self.download_metadata(repo)
         if self.opts.downloadcomps:
             if self.opts.urls:
                 mdl = dict(repo._repo.getMetadataLocations())
                 group_locations = [
                     mdl[md_type]
                     for md_type in ('group', 'group_gz', 'group_gz_zck')
                     if md_type in mdl
                 ]
                 if group_locations:
                     for group_location in group_locations:
                         url = repo.remote_location(group_location)
                         if url:
                             print(url)
                             break
                     else:
                         msg = _("Failed to get mirror for the group file.")
                         logger.warning(msg)
             else:
                 self.getcomps(repo)
         pkglist = self.get_pkglist(repo)
         if self.opts.urls:
             self.print_urls(pkglist)
         else:
             self.download_packages(pkglist)
             if self.opts.gpgcheck:
                 for pkg in pkglist:
                     local_path = self.pkg_download_path(pkg)
                     # base.package_signature_check uses pkg.localPkg() to determine
                     # the location of the package rpm file on the disk.
                     # Set it to the correct download path.
                     pkg.localPkg = types.MethodType(
                         lambda s, local_path=local_path: local_path, pkg)
                     result, error = self.base.package_signature_check(pkg)
                     if result != 0:
                         logger.warning(
                             _("Removing {}: {}").format(
                                 os.path.basename(local_path), error))
                         os.unlink(local_path)
                         gpgcheck_ok = False
         if self.opts.delete:
             self.delete_old_local_packages(repo, pkglist)
     if not gpgcheck_ok:
         raise dnf.exceptions.Error(_("GPG signature check failed."))
예제 #39
0
    def run(self):
        if self.opts.new and self.opts.old:
            raise dnf.exceptions.Error(_("Pass either --old or --new, not both!"))

        rpm_list = []
        rpm_list = self._get_file_list(self.opts.path, ".rpm")
        verfile = {}
        pkgdict = {}

        keepnum = int(self.opts.keep) * (-1)  # the number of items to keep

        if len(rpm_list) == 0:
            raise dnf.exceptions.Error(_("No files to process"))

        try:
            self.base.add_remote_rpms(rpm_list)
        except IOError:
            logger.warning(_("Could not open {}").format(', '.join(rpm_list)))

        packages = [x for x in self.base.sack.query().available()]
        packages.sort()
        for pkg in packages:
            na = (pkg.name, pkg.arch)
            if na in pkgdict:
                pkgdict[na].append(pkg)
            else:
                pkgdict[na] = [pkg]

            nevra = self._package_to_nevra(pkg)
            if nevra in verfile:
                verfile[nevra].append(self._package_to_path(pkg))
            else:
                verfile[nevra] = [self._package_to_path(pkg)]

        outputpackages = []

        # if new
        if not self.opts.old:
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                if len(evrlist) < abs(keepnum):
                    newevrs = evrlist
                else:
                    newevrs = evrlist[keepnum:]

                for package in newevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

        if self.opts.old:
            for (n, a) in pkgdict.keys():
                evrlist = pkgdict[(n, a)]

                if len(evrlist) < abs(keepnum):
                    continue

                oldevrs = evrlist[:keepnum]
                for package in oldevrs:
                    nevra = self._package_to_nevra(package)
                    for fpkg in verfile[nevra]:
                        outputpackages.append(fpkg)

        outputpackages.sort()
        if self.opts.space:
            print(" ".join(outputpackages))
        else:
            for pkg in outputpackages:
                print(pkg)