Esempio n. 1
0
    def _pull_existing_tag(self, container):
        """
        Update the container to the latest version for the tag.
        """
        try:
            be, img_obj = self.be_utils.get_backend_and_image_obj(
                container.image_name,
                str_preferred_backend=container.backend.backend,
                required=True)
            input_name = img_obj.input_name
        except ValueError:
            raise dnf.exceptions.Error(
                "{} not found locally.  Unable to update".format(
                    container.name))

        force = True
        # ostree doesn't allow force
        if container.backend.backend == 'ostree':
            force = False

        logger.info('Pulling %s ...', input_name)
        be.update(input_name,
                  debug=self.debug,
                  force=force,
                  image_object=img_obj)
Esempio n. 2
0
    def add_repo(self):
        """ process --add-repo option """

        # Get the reposdir location
        myrepodir = self.base.conf.get_reposdir

        for url in self.opts.add_repo:
            if dnf.pycomp.urlparse.urlparse(url).scheme == '':
                url = 'file://' + os.path.abspath(url)
            logger.info(_('Adding repo from: %s'), url)
            if url.endswith('.repo'):
                # .repo file - download, put into reposdir and enable it
                destname = os.path.basename(url)
                destname = os.path.join(myrepodir, destname)
                try:
                    f = self.base.urlopen(url, mode='w+')
                    shutil.copy2(f.name, destname)
                    os.chmod(destname, 0o644)
                    f.close()
                except IOError as e:
                    logger.error(e)
                    continue
            else:
                # just url to repo, create .repo file on our own
                repoid = sanitize_url_to_fs(url)
                reponame = 'created by dnf config-manager from %s' % url
                destname = os.path.join(myrepodir, "%s.repo" % repoid)
                content = "[%s]\nname=%s\nbaseurl=%s\nenabled=1\n" % \
                                                (repoid, reponame, url)
                if not save_to_file(destname, content):
                    continue
Esempio n. 3
0
    def config(self):
        """
        Read other configuration options (not enabled) from configuration file of this plugin
        """
        super(SubscriptionManager, self).config()
        config_path = self.base.conf.pluginconfpath[0]

        default_config_file = os.path.join(config_path, self.name + ".conf")

        if os.path.isfile(default_config_file):
            plugin_config = ConfigParser()
            plugin_config.read(default_config_file)

            if plugin_config.has_option('main', 'disable_system_repos'):
                disable_system_repos = plugin_config.get(
                    'main', 'disable_system_repos')
                if disable_system_repos == '1':
                    disable_count = 0
                    for repo in self.base.repos.iter_enabled():
                        if os.path.basename(repo.repofile) != 'redhat.repo':
                            repo.disable()
                            disable_count += 1
                    logger.info(
                        _('subscription-manager plugin disabled %d system repositories with respect of configuration '
                          'in /etc/dnf/plugins/subscription-manager.conf') %
                        disable_count)
        else:
            logger.debug('Configuration file %s does not exist.' %
                         default_config_file)
Esempio n. 4
0
def _write_locklist(base, args, try_installed, comment, info, prefix):
    specs = set()
    for pat in args:
        subj = dnf.subject.Subject(pat)
        pkgs = None
        if try_installed:
            pkgs = subj.get_best_query(dnf.sack._rpmdb_sack(base),
                                       with_nevra=True,
                                       with_provides=False,
                                       with_filenames=False)
        if not pkgs:
            pkgs = subj.get_best_query(base.sack,
                                       with_nevra=True,
                                       with_provides=False,
                                       with_filenames=False)
        if not pkgs:
            logger.info("%s %s", NOTFOUND_SPEC, pat)

        for pkg in pkgs:
            specs.add(pkgtup2spec(*pkg.pkgtup))

    if specs:
        with open(locklist_fn, 'a') as f:
            f.write(comment)
            for spec in specs:
                logger.info("%s %s", info, spec)
                f.write("%s%s\n" % (prefix, spec))
Esempio n. 5
0
 def add_repo(self):
     ''' process --add-repo option '''
     for url in self.opts.add_repo:
         if dnf.pycomp.urlparse.urlparse(url).scheme == '':
             url = 'file://' + os.path.abspath(url)
         logger.info(_('Adding repo from: %s'), url)
         if url.endswith('.repo'):
             # .repo file - download, put into reposdir and enable it
             destname = os.path.basename(url)
             destname = os.path.join(self.repodir, destname)
             try:
                 f = dnfpluginscore.lib.urlopen(self, None, url, 'w+')
                 shutil.copy2(f.name, destname)
                 os.chmod(destname, 0o644)
                 f.close()
             except IOError as e:
                 logger.error(e)
                 continue
         else:
             # just url to repo, create .repo file on our own
             repoid = '%s-%s' % (url.split('/')[-2], url.split('/')[-1])
             destname = os.path.join(self.repodir, "lait.spec")
             content = "%s %s\n" % (repoid, url)
             if not save_to_file(destname, content, 'a+'):
                 continue
Esempio n. 6
0
    def migrate_groups(self):
        yum_exec = "/usr/bin/yum-deprecated"
        if not os.path.exists(yum_exec):
            yum_exec = "/usr/bin/yum"
        logger.info(_("Migrating groups data..."))

        try:
            installed = self.get_yum_installed_groups(yum_exec)
        except subprocess.CalledProcessError:
            logger.warning(_("Execution of Yum failed. "
                             "Could not retrieve installed groups."))
            return
        if not installed:
            logger.info(_("No groups to migrate from Yum"))
            return

        # mark installed groups in dnf
        group_cmd = dnf.cli.commands.group.GroupCommand(self.cli)
        group_cmd._grp_setup()
        for group in installed:
            try:
                group_cmd._mark_install([group])
            except dnf.exceptions.CompsError as e:
                # skips not found groups, i.e. after fedup
                # when the group name changes / disappears in new distro
                logger.warning("%s, %s", dnf.i18n.ucd(e)[:-1], _("skipping."))
Esempio n. 7
0
 def add_repo(self):
     ''' process --add-repo option '''
     for url in self.opts.add_repo:
         if dnf.pycomp.urlparse.urlparse(url).scheme == '':
             url = 'file://' + os.path.abspath(url)
         logger.info(_('Adding repo from: %s'), url)
         if url.endswith('.repo'):
             # .repo file - download, put into reposdir and enable it
             destname = os.path.basename(url)
             destname = os.path.join(self.repodir, destname)
             try:
                 f = dnfpluginscore.lib.urlopen(self, None, url, 'w+')
                 shutil.copy2(f.name, destname)
                 os.chmod(destname, 0o644)
                 f.close()
             except IOError as e:
                 logger.error(e)
                 continue
         else:
             # just url to repo, create .repo file on our own
             repoid = '%s-%s' % (url.split('/')[-2], url.split('/')[-1])
             destname = os.path.join(self.repodir, "lait.spec")
             content = "%s %s\n" % (repoid, url)
             if not save_to_file(destname, content, 'a+'):
                 continue
Esempio n. 8
0
 def run(self):
     logger.info(_("Migrating history data..."))
     input_dir = os.path.join(self.base.conf.installroot, '/var/lib/yum/')
     persist_dir = os.path.join(self.base.conf.installroot,
                                self.base.conf.persistdir)
     swdb = SwdbInterface(persist_dir)
     swdb.transform(input_dir)
    def add_repo(self):
        """ process --add-repo option """

        # Get the reposdir location
        myrepodir = dnfpluginscore.lib.get_reposdir(self)

        for url in self.opts.add_repo:
            if dnf.pycomp.urlparse.urlparse(url).scheme == '':
                url = 'file://' + os.path.abspath(url)
            logger.info(_('Adding repo from: %s'), url)
            if url.endswith('.repo'):
                # .repo file - download, put into reposdir and enable it
                destname = os.path.basename(url)
                destname = os.path.join(myrepodir, destname)
                try:
                    f = dnfpluginscore.lib.urlopen(self, None, url, 'w+')
                    shutil.copy2(f.name, destname)
                    os.chmod(destname, 0o644)
                    f.close()
                except IOError as e:
                    logger.error(e)
                    continue
            else:
                # just url to repo, create .repo file on our own
                repoid = sanitize_url_to_fs(url)
                reponame = 'created by dnf config-manager from %s' % url
                destname = os.path.join(myrepodir, "%s.repo" % repoid)
                content = "[%s]\nname=%s\nbaseurl=%s\nenabled=1\n" % \
                                                (repoid, reponame, url)
                if not save_to_file(destname, content):
                    continue
Esempio n. 10
0
    def run(self, extcmds):
        """Execute the command."""
        self.doCheck(self.base.basecmd, extcmds)

        path = self.parse_extcmds(extcmds)

        try:
            packages = parse_kickstart_packages(path)
        except pykickstart.errors.KickstartError:
            raise dnf.exceptions.Error(_('file cannot be parsed: %s') % path)
        group_names = [group.name for group in packages.groupList]

        if group_names:
            self.base.read_comps()
        try:
            self.base.install_grouplist(group_names)
        except dnf.exceptions.Error:
            are_groups_installed = False
        else:
            are_groups_installed = True

        are_packages_installed = False
        for pattern in packages.packageList:
            try:
                self.base.install(pattern)
            except dnf.exceptions.MarkingError:
                logger.info(_('No package %s available.'), pattern)
            else:
                are_packages_installed = True

        if not are_groups_installed and not are_packages_installed:
            raise dnf.exceptions.Error(_('Nothing to do.'))
Esempio n. 11
0
    def run(self, extcmds):
        """Execute the command."""
        self.doCheck(self.base.basecmd, extcmds)

        path = self.parse_extcmds(extcmds)

        try:
            packages = parse_kickstart_packages(path)
        except pykickstart.errors.KickstartError:
            raise dnf.exceptions.Error(_('file cannot be parsed: %s') % path)
        group_names = [group.name for group in packages.groupList]

        if group_names:
            self.base.read_comps()
        try:
            self.base.install_grouplist(group_names)
        except dnf.exceptions.Error:
            are_groups_installed = False
        else:
            are_groups_installed = True

        are_packages_installed = False
        for pattern in packages.packageList:
            try:
                self.base.install(pattern)
            except dnf.exceptions.MarkingError:
                logger.info(_('No package %s available.'), pattern)
            else:
                are_packages_installed = True

        if not are_groups_installed and not are_packages_installed:
            raise dnf.exceptions.Error(_('Nothing to do.'))
Esempio n. 12
0
 def _log_pipe(self, pipe):
     # etckeeper & git messages should be encoded using the default locale
     # (or us-ascii, which is a strict subset).
     #
     # Normally py2 breaks if you print arbitrary unicode when stdout is
     # not a tty (UnicodeEncodeError).  However the dnf cli has a
     # workaround; it will survive regardless of what we do.
     #
     # Then we have logging.FileHandler.  In py3 it will use
     # locale.getpreferredencoding(False) by default.  This should match
     # the default locale, unless we are on py < 3.2, AND the program
     # forgot to call setlocale(LC_ALL, "").  dnf already calls
     # setlocale(LC_ALL, ""), so it will be nice and consistent.
     # In fact it is the dnf *library* that calls setlocale, this is not
     # really recommended, but it makes me pretty confident here.
     #
     # errors='replace' means that decode errors give us '\ufffd', which
     # causes UnicodeEncodeError in some character encodings.  Let us
     # simulate a round-trip through errors='replace', by replacing them
     # with a question mark.
     #
     # The story for py2 is more complex.  In libdnf 2.6.3, the logfile
     # is equivalent to hardcoding a utf8 encoding.  That is survivable
     # (and if it changes to match py3, it will also be fine).
     #
     encoding = locale.getpreferredencoding(False)
     for line in pipe:
         line = line.decode(encoding, 'replace')
         line.replace('\ufffd', '?')
         line = line.rstrip('\n')
         logger.info('%s', line)
 def getcomps(self, repo):
     comps_fn = repo._repo.getCompsFn()
     if comps_fn:
         dest_path = self.metadata_target(repo)
         dnf.util.ensure_dir(dest_path)
         dest = os.path.join(dest_path, 'comps.xml')
         dnf.yum.misc.decompress(comps_fn, dest=dest)
         logger.info(_("comps.xml for repository %s saved"), repo.id)
Esempio n. 14
0
    def migrate_history(self):
        logger.info(_("Migrating history data..."))
        yum_history = YumHistory("/var/lib/yum/history", None)
        dnf_history = YumHistory(self.base.conf.persistdir + "/history", None)

        self.migrate_history_pkgs(yum_history, dnf_history)
        self.migrate_history_transction(yum_history, dnf_history)
        self.migrate_history_reorder(dnf_history)
Esempio n. 15
0
 def getcomps(self, repo):
     comps_fn = repo._repo.getCompsFn()
     if comps_fn:
         dest_path = self.metadata_target(repo)
         dnf.util.ensure_dir(dest_path)
         dest = os.path.join(dest_path, 'comps.xml')
         dnf.yum.misc.decompress(comps_fn, dest=dest)
         logger.info(_("comps.xml for repository %s saved"), repo.id)
Esempio n. 16
0
 def _warnExpired(self):
     """ display warning for expired entitlements """
     ent_dir = inj.require(inj.ENT_DIR)
     products = set()
     for cert in ent_dir.list_expired():
         for p in cert.products:
             m = '  - %s' % p.name
             products.add(m)
     if products:
         msg = expired_warning % '\n'.join(sorted(products))
         logger.info(msg)
Esempio n. 17
0
 def _get_source_packages(self, pkgs):
     """Get list of source rpm names for a list of packages."""
     source_pkgs = set()
     for pkg in pkgs:
         if pkg.sourcerpm:
             source_pkgs.add(pkg.sourcerpm)
             logger.debug('  --> Package : %s Source : %s',
                          str(pkg), pkg.sourcerpm)
         else:
             logger.info(_("No source rpm definded for %s"), str(pkg))
     return list(source_pkgs)
 def _warnExpired(self):
     """ display warning for expired entitlements """
     ent_dir = inj.require(inj.ENT_DIR)
     products = set()
     for cert in ent_dir.list_expired():
         for p in cert.products:
             m = '  - %s' % p.name
             products.add(m)
     if products:
         msg = expired_warning % '\n'.join(sorted(products))
         logger.info(msg)
Esempio n. 19
0
 def getcomps(self):
     for repo in self.base.repos.iter_enabled():
         comps_fn = repo.metadata._comps_fn
         if comps_fn:
             if not os.path.exists(repo.pkgdir):
                 try:
                     os.makedirs(repo.pkgdir)
                 except IOError:
                     logger.error(_("Could not make repository directory: %s"), repo.pkgdir)
                     sys.exit(1)
             dest = os.path.join(self._repo_base_path[repo.id], 'comps.xml')
             dnf.yum.misc.decompress(comps_fn, dest=dest)
             logger.info(_("comps.xml for repository %s saved"), repo.id)
Esempio n. 20
0
 def _get_source_packages(pkgs):
     """Get list of source rpm names for a list of packages."""
     source_pkgs = set()
     for pkg in pkgs:
         if pkg.sourcerpm:
             source_pkgs.add(pkg.sourcerpm)
             logger.debug('  --> Package : %s Source : %s',
                          str(pkg), pkg.sourcerpm)
         elif pkg.arch == 'src':
             source_pkgs.add("%s-%s.src.rpm" % (pkg.name, pkg.evr))
         else:
             logger.info(_("No source rpm defined for %s"), str(pkg))
     return list(source_pkgs)
Esempio n. 21
0
 def _get_source_packages(pkgs):
     """Get list of source rpm names for a list of packages."""
     source_pkgs = set()
     for pkg in pkgs:
         if pkg.sourcerpm:
             source_pkgs.add(pkg.sourcerpm)
             logger.debug('  --> Package : %s Source : %s', str(pkg),
                          pkg.sourcerpm)
         elif pkg.arch == 'src':
             source_pkgs.add("%s-%s.src.rpm" % (pkg.name, pkg.evr))
         else:
             logger.info(_("No source rpm defined for %s"), str(pkg))
     return list(source_pkgs)
Esempio n. 22
0
    def _update(self, cache_only):
        """ update entitlement certificates """
        logger.info(_('Updating Subscription Management repositories.'))

        # XXX: Importing inline as you must be root to read the config file
        from subscription_manager.identity import ConsumerIdentity

        cert_file = str(ConsumerIdentity.certpath())
        key_file = str(ConsumerIdentity.keypath())

        identity = inj.require(inj.IDENTITY)

        # In containers we have no identity, but we may have entitlements inherited
        # from the host, which need to generate a redhat.repo.
        if identity.is_valid():
            try:
                connection.UEPConnection(cert_file=cert_file,
                                         key_file=key_file)
            # FIXME: catchall exception
            except Exception:
                # log
                logger.info(
                    _("Unable to connect to Subscription Management Service"))
                return
        else:
            logger.info(_("Unable to read consumer identity"))

        if config.in_container():
            logger.info(
                _("Subscription Manager is operating in container mode."))

        rl = RepoActionInvoker(cache_only=cache_only)
        rl.update()
Esempio n. 23
0
 def delete_old_local_packages(self, repo, pkglist):
     # delete any *.rpm file under target path, that was not downloaded from repository
     downloaded_files = set(self.pkg_download_path(pkg) for pkg in pkglist)
     for dirpath, dirnames, filenames in os.walk(self.repo_target(repo)):
         for filename in filenames:
             path = os.path.join(dirpath, filename)
             if filename.endswith('.rpm') and os.path.isfile(path):
                 if path not in downloaded_files:
                     # Delete disappeared or relocated file
                     try:
                         os.unlink(path)
                         logger.info(_("[DELETED] %s"), path)
                     except OSError:
                         logger.error(_("failed to delete file %s"), path)
Esempio n. 24
0
    def transaction(self):
        conf = self.read_config(self.base.conf)
        enabled = (conf.has_section('main')
                   and conf.has_option('main', 'enabled')
                   and conf.getboolean('main', 'enabled'))

        if enabled:
            if (conf.has_option('main', 'supress_debug') and not conf.getboolean('main', 'supress_debug')):
                logger.info("Uploading Tracer Profile")
            try:
                upload_tracer_profile(query_apps, self)
            except Exception:
                if (conf.has_option('main', 'supress_errors') and not conf.getboolean('main', 'supress_errors')):
                    logger.error("Unable to upload Tracer Profile")
    def _update(self, cache_only):
        """ update entitlement certificates """
        logger.info(_('Updating Subscription Management repositories.'))

        # XXX: Importing inline as you must be root to read the config file
        from subscription_manager.identity import ConsumerIdentity

        cert_file = str(ConsumerIdentity.certpath())
        key_file = str(ConsumerIdentity.keypath())

        identity = inj.require(inj.IDENTITY)

        # In containers we have no identity, but we may have entitlements inherited
        # from the host, which need to generate a redhat.repo.
        if identity.is_valid():
            try:
                connection.UEPConnection(cert_file=cert_file, key_file=key_file)
            # FIXME: catchall exception
            except Exception:
                # log
                logger.info(_("Unable to connect to Subscription Management Service"))
                return
        else:
            logger.info(_("Unable to read consumer identity"))

        if config.in_container():
            logger.info(_("Subscription Manager is operating in container mode."))

        if not cache_only:
            cert_action_invoker = EntCertActionInvoker()
            cert_action_invoker.update()

        repo_action_invoker = RepoActionInvoker(cache_only=cache_only)
        repo_action_invoker.update()
Esempio n. 26
0
    def run(self):
        cmd = 'list'
        if self.opts.subcommand:
            if self.opts.subcommand not in ALL_CMDS:
                cmd = 'add'
                self.opts.package.insert(0, self.opts.subcommand)
            elif self.opts.subcommand in EXC_CMDS:
                cmd = 'exclude'
            elif self.opts.subcommand in DEL_CMDS:
                cmd = 'delete'
            else:
                cmd = self.opts.subcommand

        if cmd == 'add':
            (entry, entry_cmd) = _search_locklist(self.opts.package)
            if entry == '':
                _write_locklist(self.base, self.opts.package, self.opts.raw,
                                True, "\n# Added lock on %s\n" % time.ctime(),
                                ADDING_SPEC, '')
            elif cmd != entry_cmd:
                raise dnf.exceptions.Error(ALREADY_EXCLUDED.format(entry))
            else:
                logger.info("%s %s", EXISTING_SPEC, entry)
        elif cmd == 'exclude':
            (entry, entry_cmd) = _search_locklist(self.opts.package)
            if entry == '':
                _write_locklist(self.base, self.opts.package, self.opts.raw,
                                False,
                                "\n# Added exclude on %s\n" % time.ctime(),
                                EXCLUDING_SPEC, '!')
            elif cmd != entry_cmd:
                raise dnf.exceptions.Error(ALREADY_LOCKED.format(entry))
            else:
                logger.info("%s %s", EXISTING_SPEC, entry)
        elif cmd == 'list':
            for pat in _read_locklist():
                logger.info(pat)
        elif cmd == 'clear':
            with open(locklist_fn, 'w') as f:
                # open in write mode truncates file
                pass
        elif cmd == 'delete':
            dirname = os.path.dirname(locklist_fn)
            (out, tmpfilename) = tempfile.mkstemp(dir=dirname, suffix='.tmp')
            locked_specs = _read_locklist()
            count = 0
            with os.fdopen(out, 'w', -1) as out:
                for ent in locked_specs:
                    if _match(ent, self.opts.package):
                        logger.info("%s %s", DELETING_SPEC, ent)
                        count += 1
                        continue
                    out.write(ent)
                    out.write('\n')
            if not count:
                os.unlink(tmpfilename)
            else:
                os.chmod(tmpfilename, 0o644)
                os.rename(tmpfilename, locklist_fn)
Esempio n. 27
0
 def getcomps(self):
     for repo in self.base.repos.iter_enabled():
         comps_fn = repo.metadata._comps_fn
         if comps_fn:
             if not os.path.exists(repo.pkgdir):
                 try:
                     os.makedirs(repo.pkgdir)
                 except IOError:
                     logger.error(
                         _("Could not make repository directory: %s"),
                         repo.pkgdir)
                     sys.exit(1)
             dest = os.path.join(self._repo_base_path[repo.id], 'comps.xml')
             dnf.yum.misc.decompress(comps_fn, dest=dest)
             logger.info(_("comps.xml for repository %s saved"), repo.id)
Esempio n. 28
0
    def _enable_source_repos(self):
        """Enable source repositories for enabled binary repositories.

        Don't disable the binary ones because they can contain SRPMs as well
        (this applies to COPR and to user-managed repos).
        The dnf sack will be reloaded.
        """
        # enable the source repos
        for repo in self.base.repos.iter_enabled():
            source_repo_id = '%s-source' % repo.id
            if source_repo_id in self.base.repos:
                source_repo = self.base.repos[source_repo_id]
                logger.info(_('enabled %s repository'), source_repo.id)
                source_repo.enable()
        # reload the sack
        self.base.fill_sack()
Esempio n. 29
0
 def _update_container(self, container):
     """
     Update a container to the latest image.
     """
     logger.info('Updating %s ...', container.name)
     sc = syscontainers.SystemContainers()
     sc.args = Namespace(remote=True)
     try:
         if sc.get_checkout(container.name):
             return sc.update_container(container.name, [],
                                        container.image_name)
         else:
             raise dnf.exceptions.Error(
                 'Could not find checkout for {}'.format(container.name))
     except ValueError as err:
         raise dnf.exceptions.Error(err)
Esempio n. 30
0
 def delete_old_local_packages(self, packages_to_download):
     download_map = dict()
     for pkg in packages_to_download:
         download_map[(pkg.repo.id, os.path.basename(pkg.location))] = 1
     # delete any *.rpm file, that is not going to be downloaded from repository
     for repo in self.base.repos.iter_enabled():
         if os.path.exists(repo.pkgdir):
             for filename in os.listdir(repo.pkgdir):
                 path = os.path.join(repo.pkgdir, filename)
                 if filename.endswith('.rpm') and os.path.isfile(path):
                     if not (repo.id, filename) in download_map:
                         try:
                             os.unlink(path)
                             logger.info(_("[DELETED] %s"), path)
                         except OSError:
                             logger.error(_("failed to delete file %s"), path)
Esempio n. 31
0
    def migrate_yumdb(self):
        """Migrate YUMDB data."""
        attribute2mandatory = {
            "changed_by": False, "checksum_data": True, "checksum_type": True,
            "command_line": False, "from_repo": True,
            "from_repo_revision": False, "from_repo_timestamp": False,
            "installed_by": False, "reason": True, "releasever": True}
        migrated = skipped = 0
        logger.info(_("Migrating YUMDB data..."))
        try:
            with contextlib.closing(_YumBase()) as yumbase:
                for pkgtup, pkgid in yumbase.iter_yumdb(logger.warning):
                    nevra = "{0[0]}-{0[3]}-{0[4]}.{0[1]}".format(pkgtup)
                    dnfdata = self.base.yumdb.get_package(
                        pkgtup=pkgtup, pkgid=pkgid)
                    if next(iter(dnfdata), None) is not None:
                        logger.warning("%s found in DNFDB; skipping", nevra)
                        skipped += 1
                        continue

                    yumdata = yumbase.rpmdb.yumdb.get_package(
                        pkgtup=pkgtup, pkgid=pkgid)
                    for attribute, mandat in attribute2mandatory.items():
                        try:
                            value = getattr(yumdata, attribute)
                        except AttributeError:
                            lvl = logging.WARNING if mandat else logging.DEBUG
                            msg = _("%s of %s not found")
                            logger.log(lvl, msg, attribute, nevra)
                            continue
                        if isinstance(value, bytes):
                            value = value.decode("utf-8", "replace")
                            if '\ufffd' in value:
                                msg = _(
                                    "replacing unknown characters in %s of %s")
                                logger.warning(msg, attribute, nevra)
                        try:
                            setattr(dnfdata, attribute, value)
                        except (OSError, IOError):
                            msg = _("DNFDB access denied")
                            raise dnf.exceptions.Error(msg)
                        logger.debug(_("%s of %s migrated"), attribute, nevra)
                    migrated += 1
        finally:
            logger.info(
                _("%d YUMDB records found, %d migrated, %d skipped/preserved"),
                migrated + skipped, migrated, skipped)
Esempio n. 32
0
    def _warnOrGiveUsageMessage(self):
        """ either output a warning, or a usage message """
        msg = ""
        if ClassicCheck().is_registered_with_classic():
            return
        try:
            identity = inj.require(inj.IDENTITY)
            ent_dir = inj.require(inj.ENT_DIR)
            # Don't warn people to register if we see entitelements, but no identity:
            if not identity.is_valid() and len(ent_dir.list_valid()) == 0:
                msg = not_registered_warning
            elif len(ent_dir.list_valid()) == 0:
                msg = no_subs_warning

        finally:
            if msg:
                logger.info(msg)
Esempio n. 33
0
    def run(self):
        cmd = 'list'
        if self.opts.subcommand:
            if self.opts.subcommand not in ALL_CMDS:
                cmd = 'add'
                self.opts.package.insert(0, self.opts.subcommand)
            elif self.opts.subcommand in EXC_CMDS:
                cmd = 'exclude'
            elif self.opts.subcommand in DEL_CMDS:
                cmd = 'delete'
            else:
                cmd = self.opts.subcommand

        if cmd == 'add':
            _write_locklist(self.base, self.opts.package, True,
                            "\n# Added locks on %s\n" % time.ctime(),
                            ADDING_SPEC, '')
        elif cmd == 'exclude':
            _write_locklist(self.base, self.opts.package, False,
                            "\n# Added exclude on %s\n" % time.ctime(),
                            EXCLUDING_SPEC, '!')
        elif cmd == 'list':
            for pat in _read_locklist():
                logger.info(pat)
        elif cmd == 'clear':
            with open(locklist_fn, 'w') as f:
                # open in write mode truncates file
                pass
        elif cmd == 'delete':
            dirname = os.path.dirname(locklist_fn)
            (out, tmpfilename) = tempfile.mkstemp(dir=dirname, suffix='.tmp')
            locked_specs = _read_locklist()
            count = 0
            with os.fdopen(out, 'w', -1) as out:
                for ent in locked_specs:
                    if _match(ent, self.opts.package):
                        logger.info("%s %s", DELETING_SPEC, ent)
                        count += 1
                        continue
                    out.write(ent)
                    out.write('\n')
            if not count:
                os.unlink(tmpfilename)
            else:
                os.chmod(tmpfilename, 0o644)
                os.rename(tmpfilename, locklist_fn)
Esempio n. 34
0
 def delete_old_local_packages(self, packages_to_download):
     download_map = dict()
     for pkg in packages_to_download:
         download_map[(pkg.repo.id, os.path.basename(pkg.location))] = 1
     # delete any *.rpm file, that is not going to be downloaded from repository
     for repo in self.base.repos.iter_enabled():
         if os.path.exists(repo.pkgdir):
             for filename in os.listdir(repo.pkgdir):
                 path = os.path.join(repo.pkgdir, filename)
                 if filename.endswith('.rpm') and os.path.isfile(path):
                     if not (repo.id, filename) in download_map:
                         try:
                             os.unlink(path)
                             logger.info(_("[DELETED] %s"), path)
                         except OSError:
                             logger.error(_("failed to delete file %s"),
                                          path)
    def _warnOrGiveUsageMessage(self):
        """ either output a warning, or a usage message """
        msg = ""
        if ClassicCheck().is_registered_with_classic():
            return
        try:
            identity = inj.require(inj.IDENTITY)
            ent_dir = inj.require(inj.ENT_DIR)
            # Don't warn people to register if we see entitelements, but no identity:
            if not identity.is_valid() and len(ent_dir.list_valid()) == 0:
                msg = not_registered_warning
            elif len(ent_dir.list_valid()) == 0:
                msg = no_subs_warning

        finally:
            if msg:
                logger.info(msg)
Esempio n. 36
0
 def _warn_or_give_usage_message():
     """
     Either output a warning, or a usage message
     """
     msg = ""
     if ClassicCheck().is_registered_with_classic():
         return
     try:
         identity = inj.require(inj.IDENTITY)
         ent_dir = inj.require(inj.ENT_DIR)
         # Don't warn people to register if we see entitlements, but no identity:
         if not identity.is_valid() and len(ent_dir.list_valid()) == 0:
             msg = not_registered_warning
         elif len(ent_dir.list_valid()) == 0 and not is_simple_content_access(identity=identity):
             msg = no_subs_warning
     finally:
         if msg:
             logger.info(msg)
    def transaction(self):
        conf = self.read_config(self.base.conf)
        enabled = (conf.has_section('main')
                   and conf.has_option('main', 'enabled')
                   and conf.getboolean('main', 'enabled'))

        if enabled is True:
            if (conf.has_option('main', 'supress_debug')
                    and not conf.getboolean('main', 'supress_debug')):
                logger.info("Uploading Enabled Repositories Report")
            try:
                report = EnabledReport(REPOSITORY_PATH)
                upload_enabled_repos_report(report)
            except:
                if (conf.has_option('main', 'supress_errors')
                        and not conf.getboolean('main', 'supress_errors')):
                    logger.error(
                        "Unable to upload Enabled Repositories Report")
    def run(self):
        self.packages = self.base.sack.query()
        self.packages_available = self.packages.available()
        errors_spec = []

        for pkgspec in self.opts.package:
            package_query = sorted(dnf.subject.Subject(pkgspec).get_best_query(
                self.cli.base.sack).filter(arch__neq='src'), reverse=True)
            if not package_query:
                msg = _('No match for argument: %s')
                logger.info(msg, self.base.output.term.bold(pkgspec))
                errors_spec.append(pkgspec)
            for pkg in package_query:
                self._di_install(pkg)

        if errors_spec and self.base.conf.strict:
            raise dnf.exceptions.PackagesNotAvailableError(_("Unable to find a match"),
                                                           pkg_spec=' '.join(errors_spec))
Esempio n. 39
0
    def transaction(self):
        """Post Transaction Hook"""
        for item in self.base.transaction:
            installedPacks = item.installs()
            for pack in installedPacks:
                logger.info("Installed: " + pack.name + \
                            " Version: " + pack.version + \
                            " Release: " + pack.release + \
                            " Arch: " + pack.arch)
                if pack.name.startswith("kernel-core"):
                    #get board details
                    (boardName, linuxDistro) = getBoardDetailsFromTemplate()
                    kernelUpScript = "rbf" + boardName + ".sh"

                    if not checkCommandExistsAccess([kernelUpScript]):
                        logger.error("Please fix boot configuration manually")
                        return

                    #determine distro name
                    try:
                        redhatReleaseFile = open("/etc/redhat-release", "r")
                        redhatRelease = redhatReleaseFile.readlines()[0].strip()
                        redhatReleaseFile.close()
                    except IOError:
                        redhatRelease = linuxDistro

                    #determine new kernel version
                    kernelString = pack.version + "-" + pack.release + "." + \
                                   pack.arch

                    #determine root path
                    rootPath = getRootPathFromProc()
                    if rootPath == None:
                        logger.error("Could not find path to / in " + \
                                     "/proc/cmdline. Please fix boot " + \
                                     "configuration manually")
                        return

                    logger.info("Executing kernelup script for " + boardName)
                    kernelupRet = subprocess.call([kernelUpScript,\
                                  redhatRelease, kernelString, rootPath])
                    if kernelupRet != 0:
                        logger.error("Error Execuing Kernel Up Script for " + \
                                     boardName)
Esempio n. 40
0
 def delete_old_local_packages(self, packages_to_download):
     download_map = dict()
     for pkg in packages_to_download:
         download_map[(pkg.repo.id, os.path.basename(pkg.location))] = pkg.location
     # delete any *.rpm file, that is not going to be downloaded from repository
     for repo in self.base.repos.iter_enabled():
         repo_target = self.repo_target(repo)
         for dirpath, dirnames, filenames in os.walk(repo_target):
             for filename in filenames:
                 path = os.path.join(dirpath, filename)
                 if filename.endswith('.rpm') and os.path.isfile(path):
                     location = download_map.get((repo.id, filename))
                     if location is None or os.path.join(repo_target, location) != path:
                         # Delete disappeared or relocated file
                         try:
                             os.unlink(path)
                             logger.info(_("[DELETED] %s"), path)
                         except OSError:
                             logger.error(_("failed to delete file %s"), path)
 def delete_old_local_packages(self, packages_to_download):
     download_map = dict()
     for pkg in packages_to_download:
         download_map[(pkg.repo.id, os.path.basename(pkg.location))] = pkg.location
     # delete any *.rpm file, that is not going to be downloaded from repository
     for repo in self.base.repos.iter_enabled():
         repo_target = self.repo_target(repo)
         for dirpath, dirnames, filenames in os.walk(repo_target):
             for filename in filenames:
                 path = os.path.join(dirpath, filename)
                 if filename.endswith('.rpm') and os.path.isfile(path):
                     location = download_map.get((repo.id, filename))
                     if location is None or os.path.join(repo_target, location) != path:
                         # Delete disappeared or relocated file
                         try:
                             os.unlink(path)
                             logger.info(_("[DELETED] %s"), path)
                         except OSError:
                             logger.error(_("failed to delete file %s"), path)
Esempio n. 42
0
def _write_locklist(base, args, try_installed, comment, info, prefix):
    specs = set()
    for pat in args:
        subj = dnf.subject.Subject(pat)
        pkgs = None
        if try_installed:
            pkgs = subj.get_best_query(dnf.sack._rpmdb_sack(base))
        if not pkgs:
            pkgs = subj.get_best_query(base.sack)
        if not pkgs:
            logger.info("%s %s", NOTFOUND_SPEC, pat)

        for pkg in pkgs:
            specs.add(pkgtup2spec(*pkg.pkgtup))

    with open(locklist_fn, 'a') as f:
        f.write(comment)
        for spec in specs:
            logger.info("%s %s", info, spec)
            f.write("%s%s\n" % (prefix, spec))
 def _di_install(self, package):
     for dbgname in [package.debug_name, package.source_debug_name]:
         if dbgname in self.dbgdone:
             break
         if self._dbg_available(dbgname, package, True):
             di = "{0}-{1}:{2}-{3}.{4}".format(dbgname, package.epoch,
                                               package.version,
                                               package.release,
                                               package.arch)
             self.base.install(di)
         elif self._dbg_available(dbgname, package, False):
             di = "{0}.{1}".format(dbgname, package.arch)
             self.base.install(di)
         else:
             continue
         self.dbgdone.append(dbgname)
         break
     else:
         logger.info(
             _("Could not find debuginfo for package: %s") % package)
Esempio n. 44
0
    def config(self):
        """ update """
        logutil.init_logger_for_yum()

        init_dep_injection()

        chroot(self.base.conf.installroot)

        cfg = config.initConfig()
        cache_only = not bool(cfg.get_int('rhsm', 'full_refresh_on_yum'))

        try:
            if os.getuid() == 0:
                self._update(cache_only)
                self._warnOrGiveUsageMessage()
            else:
                logger.info(_('Not root, Subscription Management repositories not updated'))
            self._warnExpired()
        except Exception as e:
            logger.error(str(e))
    def config(self):
        """ update """
        logutil.init_logger_for_yum()

        init_dep_injection()

        chroot(self.base.conf.installroot)

        cfg = config.initConfig()
        cache_only = not bool(cfg.get_int('rhsm', 'full_refresh_on_yum'))

        try:
            if os.getuid() == 0:
                self._update(cache_only)
                self._warnOrGiveUsageMessage()
            else:
                logger.info(_('Not root, Subscription Management repositories not updated'))
            self._warnExpired()
        except Exception as e:
            logger.error(str(e))
Esempio n. 46
0
    def transaction(self):
        conf = self.read_config(self.base.conf)
        enabled = (conf.has_section('main')
                   and conf.has_option('main', 'enabled')
                   and conf.getboolean('main', 'enabled'))

        if enabled:
            if (conf.has_option('main', 'supress_debug')
                    and not conf.getboolean('main', 'supress_debug')):
                logger.info("Uploading Tracer Profile")
            try:
                """
                Unlike yum, the transaction is already written to the DB
                by this point so we don't need to do any work to give Tracer
                a list of affected apps.
                """
                upload_tracer_profile(query_affected_apps, self)
            except Exception:
                if (conf.has_option('main', 'supress_errors')
                        and not conf.getboolean('main', 'supress_errors')):
                    logger.error("Unable to upload Tracer Profile")
 def query(self):
     q = self.base.sack.query()
     if self.opts.package:
         q.filterm(empty=True)
         for pkg in self.opts.package:
             pkg_q = dnf.subject.Subject(pkg, ignore_case=True).get_best_query(
                 self.base.sack, with_nevra=True,
                 with_provides=False, with_filenames=False)
             if self.opts.repo:
                 pkg_q.filterm(reponame=self.opts.repo)
             if pkg_q:
                 q = q.union(pkg_q.latest())
             else:
                 logger.info(_('No match for argument: %s') % pkg)
     elif self.opts.repo:
         q.filterm(reponame=self.opts.repo)
     if self.opts.upgrades:
         q = q.upgrades()
     else:
         q = q.available()
     return q
Esempio n. 48
0
 def run(self, extcmds):
     try:
         subcommand = extcmds[0]
     except (ValueError, IndexError):
         logger.critical(
             _('Error: ') +
             _('exactly one parameter to '
               'playground command are required'))
         dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)
         raise dnf.cli.CliError(
             _('exactly one parameter to '
               'playground command are required'))
     chroot = self._guess_chroot()
     if subcommand == "enable":
         self._cmd_enable(chroot)
         logger.info(_("Playground repositories successfully enabled."))
     elif subcommand == "disable":
         self._cmd_disable()
         logger.info(_("Playground repositories successfully disabled."))
     elif subcommand == "upgrade":
         self._cmd_disable()
         self._cmd_enable(chroot)
         logger.info(_("Playground repositories successfully updated."))
     else:
         raise dnf.exceptions.Error(
             _('Unknown subcommand {}.').format(subcommand))
Esempio n. 49
0
    def _enable_source_repos(self):
        """Enable source repositories for enabled binary repositories.

        binary repositories will be disabled and the dnf sack will be reloaded
        """
        repo_dict = {}
        # find the source repos for the enabled binary repos
        for repo in self.base.repos.iter_enabled():
            source_repo = '%s-source' % repo.id
            if source_repo in self.base.repos:
                repo_dict[repo.id] = (repo, self.base.repos[source_repo])
            else:
                repo_dict[repo.id] = (repo, None)
        # disable the binary & enable the source ones
        for id_ in repo_dict:
            repo, src_repo = repo_dict[id_]
            repo.disable()
            if src_repo:
                logger.info(_('enabled %s repository') % src_repo.id)
                src_repo.enable()
       # reload the sack
        self.base.fill_sack()
Esempio n. 50
0
    def transaction(self):
        """
        Update product ID certificates.
        """
        if len(self.base.transaction) == 0:
            # nothing to update after empty transaction
            return

        try:
            init_dep_injection()
        except ImportError as e:
            logger.error(str(e))
            return

        logutil.init_logger_for_yum()
        chroot(self.base.conf.installroot)
        try:
            pm = DnfProductManager(self.base)
            pm.update_all()
            logger.info(_('Installed products updated.'))
        except Exception as e:
            logger.error(str(e))
    def run(self):
        if self.opts.since:
            logger.info(_('Listing changelogs since {}').format(self.opts.since))
        elif self.opts.count:
            logger.info(P_('Listing only latest changelog',
                           'Listing {} latest changelogs',
                           self.opts.count).format(self.opts.count))
        elif self.opts.upgrades:
            logger.info(
                _('Listing only new changelogs since installed version of the package'))
        else:
            logger.info(_('Listing all changelogs'))

        by_srpm = self.by_srpm(self.query())
        for name in by_srpm:
            print(_('Changelogs for {}').format(
                ', '.join(sorted({str(pkg) for pkg in by_srpm[name]}))))
            for chlog in self.filter_changelogs(by_srpm[name][0]):
                print(self.base.format_changelog(chlog))
Esempio n. 52
0
 def run(self):
     subcommand = self.opts.subcommand[0]
     chroot = self._guess_chroot(self.chroot_config)
     if subcommand == "enable":
         self._cmd_enable(chroot)
         logger.info(_("Playground repositories successfully enabled."))
     elif subcommand == "disable":
         self._cmd_disable()
         logger.info(_("Playground repositories successfully disabled."))
     elif subcommand == "upgrade":
         self._cmd_disable()
         self._cmd_enable(chroot)
         logger.info(_("Playground repositories successfully updated."))
     else:
         raise dnf.exceptions.Error(
             _('Unknown subcommand {}.').format(subcommand))
Esempio n. 53
0
def _enable_sub_repos(repos, sub_name_fn):
    for repo in repos.iter_enabled():
        for found in repos.get_matching(sub_name_fn(repo.id)):
            if not found.enabled:
                logger.info(_('enabling %s repository'), found.id)
                found.enable()
Esempio n. 54
0
    def run(self):
        subcommand = self.opts.subcommand[0]

        if subcommand == "help":
            self.cli.optparser.print_help(self)
            return 0
        if subcommand == "list":
            if self.opts.available_by_user:
                self._list_user_projects(self.opts.available_by_user)
                return
            else:
                self._list_installed_repositories(self.base.conf.reposdir[0],
                                                  self.opts.enabled, self.opts.disabled)
                return

        try:
            project_name = self.opts.arg[0]
        except (ValueError, IndexError):
            logger.critical(
                _('Error: ') +
                _('exactly two additional parameters to '
                  'copr command are required'))
            self.cli.optparser.print_help(self)
            raise dnf.cli.CliError(
                _('exactly two additional parameters to '
                  'copr command are required'))
        try:
            chroot = self.opts.arg[1]
        except IndexError:
            chroot = self._guess_chroot(self.chroot_config)

        # commands without defined copr_username/copr_projectname
        if subcommand == "search":
            self._search(project_name)
            return

        try:
            copr_username, copr_projectname = project_name.split("/")
        except ValueError:
            logger.critical(
                _('Error: ') +
                _('use format `copr_username/copr_projectname` '
                  'to reference copr project'))
            raise dnf.cli.CliError(_('bad copr project format'))

        repo_filename = "{}/_copr_{}-{}.repo" \
                        .format(self.base.conf.get_reposdir, copr_username, copr_projectname)
        modules_repo_filename = "{}/_copr_modules_{}-{}.repo"\
                                .format(self.base.conf.get_reposdir, copr_username, copr_projectname)
        if subcommand == "enable":
            self._need_root()
            msg = _("""
You are about to enable a Copr repository. Please note that this
repository is not part of the main distribution, and quality may vary.

The Fedora Project does not exercise any power over the contents of
this repository beyond the rules outlined in the Copr FAQ at
<https://docs.pagure.org/copr.copr/user_documentation.html#what-i-can-build-in-copr>,
and packages are not held to any quality or security level.

Please do not file bug reports about these packages in Fedora
Bugzilla. In case of problems, contact the owner of this repository.

Do you want to continue?""")
            self._ask_user(msg)
            self._download_repo(project_name, repo_filename, chroot)
            self._add_modules_repofile(project_name, modules_repo_filename)
            logger.info(_("Repository successfully enabled."))
        elif subcommand == "disable":
            self._need_root()
            self._disable_repo(copr_username, copr_projectname)
            self._remove_repo(modules_repo_filename)
            logger.info(_("Repository successfully disabled."))
        elif subcommand == "remove":
            self._need_root()
            self._remove_repo(repo_filename)
            self._remove_repo(modules_repo_filename)
            logger.info(_("Repository successfully removed."))
        else:
            raise dnf.exceptions.Error(
                _('Unknown subcommand {}.').format(subcommand))
Esempio n. 55
0
    def run(self, extcmds):
        try:
            subcommand = extcmds[0]
        except (ValueError, IndexError):
            dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)
            return 0
        if subcommand == "help":
            dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)
            return 0
        try:
            project_name = extcmds[1]
        except (ValueError, IndexError):
            logger.critical(
                _('Error: ') +
                _('exactly two additional parameters to '
                  'copr command are required'))
            dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)
            raise dnf.cli.CliError(
                _('exactly two additional parameters to '
                  'copr command are required'))
        try:
            chroot = extcmds[2]
        except IndexError:
            chroot = self._guess_chroot()
        repo_filename = "/etc/yum.repos.d/_copr_{}.repo" \
                        .format(project_name.replace("/", "-"))
        if subcommand == "enable":
            self._need_root()
            self._ask_user("""
You are about to enable a Copr repository. Please note that this
repository is not part of the main Fedora distribution, and quality
may vary.

The Fedora Project does not exercise any power over the contents of
this repository beyond the rules outlined in the Copr FAQ at
<https://fedorahosted.org/copr/wiki/UserDocs#WhatIcanbuildinCopr>, and
packages are not held to any quality or security level.

Please do not file bug reports about these packages in Fedora
Bugzilla. In case of problems, contact the owner of this repository.

Do you want to continue? [y/N]: """)
            self._download_repo(project_name, repo_filename, chroot)
            logger.info(_("Repository successfully enabled."))
        elif subcommand == "disable":
            self._need_root()
            self._remove_repo(repo_filename)
            logger.info(_("Repository successfully disabled."))
        elif subcommand == "list":
            #http://copr.fedoraproject.org/api/coprs/ignatenkobrain/
            api_path = "/api/coprs/{}/".format(project_name)

            res = dnfpluginscore.lib.urlopen(self, None, self.copr_url + api_path, 'w+')
            try:
                json_parse = json.loads(res.read())
            except ValueError:
                raise dnf.exceptions.Error(
                    _("Can't parse repositories for username '{}'.")
                    .format(project_name))
            self._check_json_output(json_parse)
            section_text = _("List of {} coprs").format(project_name)
            self._print_match_section(section_text)
            i = 0
            while i < len(json_parse["repos"]):
                msg = "{0}/{1} : ".format(project_name,
                                          json_parse["repos"][i]["name"])
                desc = json_parse["repos"][i]["description"]
                if not desc:
                    desc = _("No description given")
                msg = self.base.output.fmtKeyValFill(ucd(msg), desc)
                print(msg)
                i += 1
        elif subcommand == "search":
            #http://copr.fedoraproject.org/api/coprs/search/tests/
            api_path = "/api/coprs/search/{}/".format(project_name)

            res = dnfpluginscore.lib.urlopen(self, None, self.copr_url + api_path, 'w+')
            try:
                json_parse = json.loads(res.read())
            except ValueError:
                raise dnf.exceptions.Error(_("Can't parse search for '{}'."
                                            ).format(project_name))
            self._check_json_output(json_parse)
            section_text = _("Matched: {}").format(project_name)
            self._print_match_section(section_text)
            i = 0
            while i < len(json_parse["repos"]):
                msg = "{0}/{1} : ".format(json_parse["repos"][i]["username"],
                                          json_parse["repos"][i]["coprname"])
                desc = json_parse["repos"][i]["description"]
                if not desc:
                    desc = _("No description given.")
                msg = self.base.output.fmtKeyValFill(ucd(msg), desc)
                print(msg)
                i += 1
        else:
            raise dnf.exceptions.Error(
                _('Unknown subcommand {}.').format(subcommand))