Example #1
0
    def remove(self, module_specs):
        # :api
        no_match_specs = []
        remove_package_set = set()

        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                no_match_specs.append(spec)
                continue
            module_dict = self._create_module_dict_and_enable(module_list, False)
            remove_packages_names = []
            for name, streamdict in module_dict.items():
                for stream, module_list_from_dict in streamdict.items():
                    remove_packages_names.extend(self._get_package_name_set_and_remove_profiles(
                        module_list_from_dict, nsvcap, True))
            if not remove_packages_names:
                logger.error(_("Unable to match profile in argument {}").format(spec))
            remove_package_set.update(remove_packages_names)

        if remove_package_set:
            keep_pkg_names = self.base._moduleContainer.getInstalledPkgNames()
            remove_package_set = remove_package_set.difference(keep_pkg_names)
            if remove_package_set:
                query = self.base.sack.query().installed().filterm(name=remove_package_set)
                if query:
                    self.base._remove_if_unneeded(query)
        return no_match_specs
Example #2
0
 def _resolve_specs_enable_update_sack(self, module_specs):
     no_match_specs = []
     error_spec = []
     module_dicts = {}
     for spec in module_specs:
         module_list, nsvcap = self._get_modules(spec)
         if not module_list:
             no_match_specs.append(spec)
             continue
         try:
             module_dict = self._create_module_dict_and_enable(module_list, True)
             module_dicts[spec] = (nsvcap, module_dict)
         except (RuntimeError, EnableMultipleStreamsException) as e:
             error_spec.append(spec)
             logger.error(ucd(e))
             logger.error(_("Unable to resolve argument {}").format(spec))
     hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
     solver_errors = self.base.sack.filter_modules(
         self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot, None,
         self.base.conf.debug_solver
     )
     for nsvcap, moduleDict in module_dicts.values():
         for streamDict in moduleDict.values():
             for modules in streamDict.values():
                 self.base._moduleContainer.enableDependencyTree(
                     libdnf.module.VectorModulePackagePtr(modules))
     return no_match_specs, error_spec, solver_errors, module_dicts
Example #3
0
 def run_on_module(self):
     try:
         self.module_base.disable(self.opts.module_spec)
     except dnf.exceptions.MarkingErrors as e:
         if self.base.conf.strict:
             raise e
         logger.error(str(e))
    def remove(self, module_specs):
        no_match_specs = []
        remove_package_set = set()

        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                no_match_specs.append(spec)
                continue
            module_dict = self._create_module_dict_and_enable(module_list, False)
            remove_packages_names = []
            for name, streamdict in module_dict.items():
                for stream, module_list_from_dict in streamdict.items():
                    remove_packages_names.extend(self._get_package_name_set_and_remove_profiles(
                        module_list_from_dict, nsvcap, True))
            if not remove_packages_names:
                logger.error(_("Unable to match profile in argument {}").format(spec))
            remove_package_set.update(remove_packages_names)

        if remove_package_set:
            keep_pkg_names = self.base._moduleContainer.getInstalledPkgNames()
            remove_package_set = remove_package_set.difference(keep_pkg_names)
            if remove_package_set:
                query = self.base.sack.query().installed().filterm(name=remove_package_set)
                if query:
                    self.base._remove_if_unneeded(query)
        return no_match_specs
Example #5
0
    def _modules_reset_or_disable(self, module_specs, to_state):
        no_match_specs = []
        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                logger.error(_("Unable to resolve argument {}").format(spec))
                no_match_specs.append(spec)
                continue
            if nsvcap.stream or nsvcap.version or nsvcap.context or nsvcap.arch or nsvcap.profile:
                logger.info(_("Only module name is required. "
                              "Ignoring unneeded information in argument: '{}'").format(spec))
            module_names = set()
            for module in module_list:
                module_names.add(module.getName())
            for name in module_names:
                if to_state == STATE_UNKNOWN:
                    self.base._moduleContainer.reset(name)
                if to_state == STATE_DISABLED:
                    self.base._moduleContainer.disable(name)

        hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
        try:
            solver_errors = self.base.sack.filter_modules(
                self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot,
                self.base.conf.module_platform_id, update_only=True,
                debugsolver=self.base.conf.debug_solver)
        except hawkey.Exception as e:
            raise dnf.exceptions.Error(ucd(e))
        return no_match_specs, solver_errors
Example #6
0
        def run_on_module(self):
            skipped_groups = self.module_base.remove(self.opts.module_spec)
            if self.opts.all:
                modules_from_specs = self._get_modules_from_name_stream_specs()
                remove_names_from_spec, __ = self._get_module_artifact_names(
                    modules_from_specs, set())
                keep_names, __ = self._get_module_artifact_names(
                    self.base._moduleContainer.getModulePackages(),
                    modules_from_specs)
                remove_query = self.base.sack.query().installed().filterm(
                    name=remove_names_from_spec)
                keep_query = self.base.sack.query().installed().filterm(
                    name=keep_names)
                for pkg in remove_query:
                    if pkg in keep_query:
                        msg = _(
                            "Package {} belongs to multiple modules, skipping"
                        ).format(pkg)
                        logger.info(msg)
                    else:
                        self.base.goal.erase(pkg,
                                             clean_deps=self.base.conf.
                                             clean_requirements_on_remove)
            if not skipped_groups:
                return

            logger.error(
                dnf.exceptions.MarkingErrors(
                    no_match_group_specs=skipped_groups))
Example #7
0
    def _modules_reset_or_disable(self, module_specs, to_state):
        no_match_specs = []
        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                logger.error(_("Unable to resolve argument {}").format(spec))
                no_match_specs.append(spec)
                continue
            if nsvcap.profile:
                logger.info(_("Ignoring unnecessary profile: '{}/{}'").format(
                    nsvcap.name, nsvcap.profile))
            module_names = set()
            for module in module_list:
                module_names.add(module.getName())
            for name in module_names:
                if to_state == STATE_UNKNOWN:
                    self.base._moduleContainer.reset(name)
                if to_state == STATE_DISABLED:
                    self.base._moduleContainer.disable(name)

        hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
        self.base.sack.filter_modules(self.base._moduleContainer, hot_fix_repos,
                                      self.base.conf.installroot, self.base.conf.module_platform_id,
                                      update_only=True)
        return no_match_specs
Example #8
0
    def _install_profiles_internal(self, install_set_artifacts, install_dict,
                                   strict):
        #  Remove source packages because they cannot be installed or upgraded
        base_no_source_query = self.base.sack.query().filterm(
            arch__neq=['src', 'nosrc']).apply()
        install_base_query = base_no_source_query.filter(
            nevra_strict=install_set_artifacts)
        error_specs = []

        # add hot-fix packages
        hot_fix_repos = [
            i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes
        ]
        hotfix_packages = base_no_source_query.filter(reponame=hot_fix_repos,
                                                      name=install_dict.keys())
        install_base_query = install_base_query.union(hotfix_packages)

        for pkg_name, set_specs in install_dict.items():
            query = install_base_query.filter(name=pkg_name)
            if not query:
                # package can also be non-modular or part of another stream
                query = base_no_source_query.filter(name=pkg_name)
                if not query:
                    for spec in set_specs:
                        logger.error(
                            _("Unable to resolve argument {}").format(spec))
                    logger.error(_("No match for package {}").format(pkg_name))
                    error_specs.extend(set_specs)
                    continue
            self.base._goal.group_members.add(pkg_name)
            sltr = dnf.selector.Selector(self.base.sack)
            sltr.set(pkg=query)
            self.base._goal.install(select=sltr, optional=(not strict))
        return install_base_query, error_specs
    def _modules_reset_or_disable(self, module_specs, to_state):
        no_match_specs = []
        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                logger.error(_("Unable to resolve argument {}").format(spec))
                no_match_specs.append(spec)
                continue
            if nsvcap.stream or nsvcap.version or nsvcap.context or nsvcap.arch or nsvcap.profile:
                logger.info(_("Only module name is required. "
                              "Ignoring unneeded information in argument: '{}'").format(spec))
            module_names = set()
            for module in module_list:
                module_names.add(module.getName())
            for name in module_names:
                if to_state == STATE_UNKNOWN:
                    self.base._moduleContainer.reset(name)
                if to_state == STATE_DISABLED:
                    self.base._moduleContainer.disable(name)

        hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
        try:
            solver_errors = self.base.sack.filter_modules(
                self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot,
                self.base.conf.module_platform_id, update_only=True,
                debugsolver=self.base.conf.debug_solver)
        except hawkey.Exception as e:
            raise dnf.exceptions.Error(ucd(e))
        return no_match_specs, solver_errors
Example #10
0
 def run_on_module(self):
     try:
         self.module_base.reset(self.opts.module_spec)
     except dnf.exceptions.MarkingErrors as e:
         if self.base.conf.strict:
             if e.no_match_group_specs:
                 raise e
         logger.error(str(e))
Example #11
0
 def run_on_module(self):
     try:
         self.module_base.reset(self.opts.module_spec)
     except dnf.exceptions.MarkingErrors as e:
         if self.base.conf.strict:
             if e.no_match_group_specs:
                 raise e
         logger.error(str(e))
Example #12
0
        def run_on_module(self):
            skipped_groups = self.module_base.remove(self.opts.module_spec)
            if not skipped_groups:
                return

            logger.error(
                dnf.exceptions.MarkingErrors(
                    no_match_group_specs=skipped_groups))
Example #13
0
 def run_on_module(self):
     try:
         self.module_base.install(self.opts.module_spec, self.base.conf.strict)
     except ModuleMarkingError as e:
         no_match_specs = e.no_match_specs
         if no_match_specs:
             for spec in no_match_specs:
                 logger.error(_("Unable to resolve argument {}").format(spec))
         if self.base.conf.strict:
             raise e
Example #14
0
    def upgrade(self, module_specs):
        # :api
        no_match_specs = []
        fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME
        fail_safe_repo_used = False

        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                no_match_specs.append(spec)
                continue
            update_module_list = [x for x in module_list
                                  if self.base._moduleContainer.isModuleActive(x.getId())]
            if not update_module_list:
                logger.error(_("Unable to resolve argument {}").format(spec))
                continue
            module_dict = self._create_module_dict_and_enable(update_module_list, False)
            upgrade_package_set = set()
            for name, streamdict in module_dict.items():
                for stream, module_list_from_dict in streamdict.items():
                    upgrade_package_set.update(self._get_package_name_set_and_remove_profiles(
                        module_list_from_dict, nsvcap))
                    latest_module = self._get_latest(module_list_from_dict)
                    if latest_module.getRepoID() == fail_safe_repo:
                        msg = _(
                            "Upgrading module '{0}' from Fail-Safe repository {1} is not allowed")
                        logger.critical(msg.format(latest_module.getNameStream(), fail_safe_repo))
                        fail_safe_repo_used = True
                    if nsvcap.profile:
                        profiles_set = latest_module.getProfiles(nsvcap.profile)
                        if not profiles_set:
                            continue
                        for profile in profiles_set:
                            upgrade_package_set.update(profile.getContent())
                    else:
                        for profile in latest_module.getProfiles():
                            upgrade_package_set.update(profile.getContent())
                        for artefact in latest_module.getArtifacts():
                            subj = hawkey.Subject(artefact)
                            for nevra_obj in subj.get_nevra_possibilities(
                                    forms=[hawkey.FORM_NEVRA]):
                                upgrade_package_set.add(nevra_obj.name)

            if not upgrade_package_set:
                logger.error(_("Unable to match profile in argument {}").format(spec))
            query = self.base.sack.query().filterm(name=upgrade_package_set)
            if query:
                sltr = dnf.selector.Selector(self.base.sack)
                sltr.set(pkg=query)
                self.base._goal.upgrade(select=sltr)
        if fail_safe_repo_used:
            raise dnf.exceptions.Error(_(
                "Upgrading module from Fail-Safe repository is not allowed"))
        return no_match_specs
Example #15
0
 def run_on_module(self):
     try:
         self.module_base.disable(self.opts.module_spec)
     except dnf.exceptions.MarkingErrors as e:
         if self.base.conf.strict:
             if e.no_match_group_specs or e.error_group_specs:
                 raise e
             if e.module_depsolv_errors and e.module_depsolv_errors[1] != \
                     libdnf.module.ModulePackageContainer.ModuleErrorType_ERROR_IN_DEFAULTS:
                 raise e
         logger.error(str(e))
Example #16
0
 def run_on_module(self):
     try:
         self.module_base.disable(self.opts.module_spec)
     except dnf.exceptions.MarkingErrors as e:
         if self.base.conf.strict:
             if e.no_match_group_specs or e.error_group_specs:
                 raise e
             if e.module_depsolv_errors and e.module_depsolv_errors[1] != \
                     libdnf.module.ModulePackageContainer.ModuleErrorType_ERROR_IN_DEFAULTS:
                 raise e
         logger.error(str(e))
Example #17
0
    def upgrade(self, module_specs):
        no_match_specs = []

        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                no_match_specs.append(spec)
                continue
            update_module_list = [
                x for x in module_list
                if self.base._moduleContainer.isModuleActive(x.getId())
            ]
            if not update_module_list:
                logger.error(_("Unable to resolve argument {}").format(spec))
                continue
            module_dict = self._create_module_dict_and_enable(
                update_module_list, False)
            upgrade_package_set = set()
            for name, streamdict in module_dict.items():
                for stream, module_list_from_dict in streamdict.items():
                    upgrade_package_set.update(
                        self._get_package_name_set_and_remove_profiles(
                            module_list_from_dict, nsvcap))
                    latest_module = self._get_latest(module_list_from_dict)
                    installed_profiles_strings = set(
                        self.base._moduleContainer.getInstalledProfiles(
                            latest_module.getName()))
                    if not installed_profiles_strings:
                        continue
                    if nsvcap.profile:
                        profiles_set = latest_module.getProfiles(
                            nsvcap.profile)
                        if not profiles_set:
                            continue
                        for profile in profiles_set:
                            if profile.getName() in installed_profiles_strings:
                                upgrade_package_set.update(
                                    profile.getContent())
                    else:
                        for profile_string in installed_profiles_strings:
                            for profile in latest_module.getProfiles(
                                    profile_string):
                                upgrade_package_set.update(
                                    profile.getContent())
            if not upgrade_package_set:
                logger.error(
                    _("Unable to match profile in argument {}").format(spec))
            query = self.base.sack.query().available().filterm(
                name=upgrade_package_set)
            if query:
                sltr = dnf.selector.Selector(self.base.sack)
                sltr.set(pkg=query)
                self.base._goal.upgrade(select=sltr)
        return no_match_specs
Example #18
0
    def upgrade(self, module_specs):
        no_match_specs = []

        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                no_match_specs.append(spec)
                continue
            update_module_list = [
                x for x in module_list
                if self.base._moduleContainer.isModuleActive(x.getId())
            ]
            if not update_module_list:
                logger.error(_("Unable to resolve argument {}").format(spec))
                continue
            module_dict = self._create_module_dict_and_enable(
                update_module_list, False)
            upgrade_package_set = set()
            for name, streamdict in module_dict.items():
                for stream, module_list_from_dict in streamdict.items():
                    upgrade_package_set.update(
                        self._get_package_name_set_and_remove_profiles(
                            module_list_from_dict, nsvcap))
                    latest_module = self._get_latest(module_list_from_dict)
                    if nsvcap.profile:
                        profiles_set = latest_module.getProfiles(
                            nsvcap.profile)
                        if not profiles_set:
                            continue
                        for profile in profiles_set:
                            upgrade_package_set.update(profile.getContent())
                    else:
                        for profile in latest_module.getProfiles():
                            upgrade_package_set.update(profile.getContent())
                        for artefact in latest_module.getArtifacts():
                            subj = hawkey.Subject(artefact)
                            for nevra_obj in subj.get_nevra_possibilities(
                                    forms=[hawkey.FORM_NEVRA]):
                                upgrade_package_set.add(nevra_obj.name)

            if not upgrade_package_set:
                logger.error(
                    _("Unable to match profile in argument {}").format(spec))
            query = self.base.sack.query().available().filterm(
                name=upgrade_package_set)
            if query:
                sltr = dnf.selector.Selector(self.base.sack)
                sltr.set(pkg=query)
                self.base._goal.upgrade(select=sltr)
        return no_match_specs
Example #19
0
 def _enable_dependencies(self, module_dicts):
     error_spec = []
     for spec, (nsvcap, moduleDict) in module_dicts.items():
         for streamDict in moduleDict.values():
             for modules in streamDict.values():
                 try:
                     self.base._moduleContainer.enableDependencyTree(
                         libdnf.module.VectorModulePackagePtr(modules))
                 except RuntimeError as e:
                     error_spec.append(spec)
                     logger.error(ucd(e))
                     logger.error(
                         _("Unable to resolve argument {}").format(spec))
     return error_spec
 def _resolve_specs_enable_update_sack(self, module_specs):
     no_match_specs = []
     error_spec = []
     module_dicts = {}
     for spec in module_specs:
         module_list, nsvcap = self._get_modules(spec)
         if not module_list:
             no_match_specs.append(spec)
             continue
         try:
             module_dict = self._create_module_dict_and_enable(module_list, True)
             module_dicts[spec] = (nsvcap, module_dict)
         except (RuntimeError, EnableMultipleStreamsException) as e:
             error_spec.append(spec)
             logger.error(ucd(e))
             logger.error(_("Unable to resolve argument {}").format(spec))
     hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
     try:
         solver_errors = self.base.sack.filter_modules(
             self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot,
             self.base.conf.module_platform_id,
             self.base.conf.debug_solver)
     except hawkey.Exception as e:
         raise dnf.exceptions.Error(ucd(e))
     for spec, (nsvcap, moduleDict) in module_dicts.items():
         for streamDict in moduleDict.values():
             for modules in streamDict.values():
                 try:
                     self.base._moduleContainer.enableDependencyTree(
                         libdnf.module.VectorModulePackagePtr(modules))
                 except RuntimeError as e:
                     error_spec.append(spec)
                     logger.error(ucd(e))
                     logger.error(_("Unable to resolve argument {}").format(spec))
     return no_match_specs, error_spec, solver_errors, module_dicts
Example #21
0
 def run_on_module(self):
     try:
         self.module_base.install(self.opts.module_spec, self.base.conf.strict)
     except dnf.exceptions.MarkingErrors as e:
         if self.base.conf.strict:
             if e.no_match_group_specs or e.error_group_specs:
                 raise e
         logger.error(str(e))
     switchedModules = dict(self.base._moduleContainer.getSwitchedStreams())
     if switchedModules:
         report_module_switch(switchedModules)
         msg = _("It is not possible to switch enabled streams of a module.\n"
                 "It is recommended to remove all installed content from the module, and "
                 "reset the module using 'dnf module reset <module_name>' command. After "
                 "you reset the module, you can install the other stream.")
         raise dnf.exceptions.Error(msg)
Example #22
0
 def run_on_module(self):
     try:
         self.module_base.install(self.opts.module_spec, self.base.conf.strict)
     except dnf.exceptions.MarkingErrors as e:
         if self.base.conf.strict:
             if e.no_match_group_specs or e.error_group_specs:
                 raise e
         logger.error(str(e))
     switchedModules = dict(self.base._moduleContainer.getSwitchedStreams())
     if switchedModules:
         report_module_switch(switchedModules)
         msg = _("It is not possible to switch enabled streams of a module.\n"
                 "It is recommended to remove all installed content from the module, and "
                 "reset the module using 'dnf module reset <module_name>' command. After "
                 "you reset the module, you can install the other stream.")
         raise dnf.exceptions.Error(msg)
Example #23
0
 def _resolve_specs_enable(self, module_specs):
     no_match_specs = []
     error_spec = []
     module_dicts = {}
     for spec in module_specs:
         module_list, nsvcap = self._get_modules(spec)
         if not module_list:
             no_match_specs.append(spec)
             continue
         try:
             module_dict = self._create_module_dict_and_enable(
                 module_list, spec, True)
             module_dicts[spec] = (nsvcap, module_dict)
         except (RuntimeError, EnableMultipleStreamsException) as e:
             error_spec.append(spec)
             logger.error(ucd(e))
             logger.error(_("Unable to resolve argument {}").format(spec))
     return no_match_specs, error_spec, module_dicts
    def upgrade(self, module_specs):
        no_match_specs = []

        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                no_match_specs.append(spec)
                continue
            update_module_list = [x for x in module_list
                                  if self.base._moduleContainer.isModuleActive(x.getId())]
            if not update_module_list:
                logger.error(_("Unable to resolve argument {}").format(spec))
                continue
            module_dict = self._create_module_dict_and_enable(update_module_list, False)
            upgrade_package_set = set()
            for name, streamdict in module_dict.items():
                for stream, module_list_from_dict in streamdict.items():
                    upgrade_package_set.update(self._get_package_name_set_and_remove_profiles(
                        module_list_from_dict, nsvcap))
                    latest_module = self._get_latest(module_list_from_dict)
                    if nsvcap.profile:
                        profiles_set = latest_module.getProfiles(nsvcap.profile)
                        if not profiles_set:
                            continue
                        for profile in profiles_set:
                            upgrade_package_set.update(profile.getContent())
                    else:
                        for profile in latest_module.getProfiles():
                            upgrade_package_set.update(profile.getContent())
                        for artefact in latest_module.getArtifacts():
                            subj = hawkey.Subject(artefact)
                            for nevra_obj in subj.get_nevra_possibilities(
                                    forms=[hawkey.FORM_NEVRA]):
                                upgrade_package_set.add(nevra_obj.name)

            if not upgrade_package_set:
                logger.error(_("Unable to match profile in argument {}").format(spec))
            query = self.base.sack.query().available().filterm(name=upgrade_package_set)
            if query:
                sltr = dnf.selector.Selector(self.base.sack)
                sltr.set(pkg=query)
                self.base._goal.upgrade(select=sltr)
        return no_match_specs
Example #25
0
 def _resolve_specs_enable_update_sack(self, module_specs):
     no_match_specs = []
     error_spec = []
     module_dicts = {}
     for spec in set(module_specs):
         module_list, nsvcap = self._get_modules(spec)
         if not module_list:
             no_match_specs.append(spec)
             continue
         try:
             module_dict = self._create_module_dict_and_enable(
                 module_list, True)
             module_dicts[spec] = (nsvcap, module_dict)
         except (RuntimeError, EnableMultipleStreamsException) as e:
             error_spec.append(spec)
             logger.error(ucd(e))
             logger.error(_("Unable to resolve argument {}").format(spec))
     hot_fix_repos = [
         i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes
     ]
     self.base.sack.filter_modules(self.base._moduleContainer,
                                   hot_fix_repos,
                                   self.base.conf.installroot, None)
     return no_match_specs, error_spec, module_dicts
Example #26
0
    def _modules_reset_or_disable(self, module_specs, to_state):
        no_match_specs = []
        for spec in module_specs:
            module_list, nsvcap = self._get_modules(spec)
            if not module_list:
                logger.error(_("Unable to resolve argument {}").format(spec))
                no_match_specs.append(spec)
                continue
            if nsvcap.stream or nsvcap.version or nsvcap.context or nsvcap.arch or nsvcap.profile:
                logger.info(
                    _("Only module name is required. "
                      "Ignoring unneeded information in argument: '{}'").
                    format(spec))
            module_names = set()
            for module in module_list:
                module_names.add(module.getName())
            for name in module_names:
                if to_state == STATE_UNKNOWN:
                    self.base._moduleContainer.reset(name)
                if to_state == STATE_DISABLED:
                    self.base._moduleContainer.disable(name)

        solver_errors = self._update_sack()
        return no_match_specs, solver_errors
Example #27
0
def transformSwdb(input_dir, output_file):
    yumdb_path = os.path.join(input_dir, 'yumdb')
    history_path = os.path.join(input_dir, 'history')
    groups_path = os.path.join(input_dir, 'groups.json')

    state_dict = {}
    repo_dict = {}

    # create binding with STATE_TYPE - returns ID
    def bind_state(cursor, desc):
        code = state_dict.get(desc)
        if code:
            return code
        cursor.execute('SELECT state FROM STATE_TYPE WHERE description=?', (desc, ))
        state_id = cursor.fetchone()
        if state_id is None:
            cursor.execute('INSERT INTO STATE_TYPE VALUES(null,?)', (desc, ))
            cursor.execute('SELECT last_insert_rowid()')
            state_id = cursor.fetchone()
        state_dict[desc] = state_id[0]
        return state_id[0]

    # create binding with repo - returns R_ID
    def bind_repo(cursor, name):
        code = repo_dict.get(name)
        if code:
            return code
        cursor.execute('SELECT R_ID FROM REPO WHERE name=?', (name, ))
        rid = cursor.fetchone()
        if rid is None:
            cursor.execute('INSERT INTO REPO VALUES(null,?)', (name, ))
            cursor.execute('SELECT last_insert_rowid()')
            rid = cursor.fetchone()
        repo_dict[name] = rid[0]
        return rid[0]

    # check path to yumdb dir
    if not os.path.isdir(yumdb_path):
        logger.error(_('Error: yumdb directory not valid'))
        return False

    # check path to history dir
    if not os.path.isdir(history_path):
        logger.error(_('Error: history directory not valid'))
        return False

    # check historyDB file and pick newest one
    historydb_file = glob.glob(os.path.join(history_path, "history*"))
    if len(historydb_file) < 1:
        logger.error(_('Error: history database file not valid'))
        return False
    historydb_file.sort()
    historydb_file = historydb_file[0]

    if not os.path.isfile(historydb_file):
        logger.error(_('Error: history database file not valid'))
        return False

    tmp_output_file = output_file + '.transform'
    try:
        # initialise historyDB
        historyDB = sqlite3.connect(historydb_file)
        h_cursor = historyDB.cursor()
    except:
        logger.error(_("ERROR: unable to open the database '{}'").format(historydb_file))
        return False

    try:
        # initialise output DB
        os.rename(output_file, tmp_output_file)
        database = sqlite3.connect(tmp_output_file)
        cursor = database.cursor()
    except:
        logger.error(_("ERROR: unable to create the database '{}'").format(tmp_output_file))
        return False

    # value distribution in tables
    PACKAGE_DATA = ['P_ID', 'R_ID', 'from_repo_revision', 'from_repo_timestamp',
                    'installed_by', 'changed_by']

    TRANS_DATA = ['T_ID', 'PD_ID', 'TG_ID', 'done', 'obsoleting', 'reason', 'state']

    GROUPS = ['name_id', 'name', 'ui_name', 'installed', 'pkg_types']

    ENVIRONMENTS = ['name_id', 'name', 'ui_name', 'pkg_types', 'grp_types']

    logger.info(_("Transforming the software database. It may take some time."))

    # contruction of PACKAGE from pkgtups
    h_cursor.execute('SELECT * FROM pkgtups')
    for row in h_cursor:
        record_P = [
            row[0],  # P_ID
            row[1],  # name
            row[3],  # epoch
            row[4],  # version
            row[5],  # release
            row[2]  # arch
        ]
        if row[6]:
            checksum_type, checksum_data = row[6].split(":", 2)
            record_P.append(checksum_data)
            record_P.append(checksum_type)
        else:
            record_P += ['', '']
        record_P.append(SwdbItem.RPM)  # type
        cursor.execute('INSERT INTO PACKAGE VALUES (?,?,?,?,?,?,?,?,?)', record_P)

    # save changes
    database.commit()

    # construction of PACKAGE_DATA according to pkg_yumdb
    actualPID = 0
    record_PD = [''] * len(PACKAGE_DATA)
    h_cursor.execute('SELECT * FROM pkg_yumdb')

    # for each row in pkg_yumdb
    for row in h_cursor:
        newPID = row[0]
        if actualPID != newPID:
            if actualPID != 0:
                record_PD[0] = actualPID
                # insert new record into PACKAGE_DATA
                PACKAGE_DATA_INSERT(cursor, record_PD)

            actualPID = newPID
            record_PD = [''] * len(PACKAGE_DATA)

        if row[1] in PACKAGE_DATA:
            # collect data for record from pkg_yumdb
            record_PD[PACKAGE_DATA.index(row[1])] = row[2]

        elif row[1] == "from_repo":
            # create binding with REPO table
            record_PD[1] = bind_repo(cursor, row[2])

    record_PD[0] = actualPID
    PACKAGE_DATA_INSERT(cursor, record_PD)  # insert last record

    # save changes
    database.commit()

    # prepare pid to pdid dictionary
    cursor.execute("SELECT PD_ID, P_ID FROM PACKAGE_DATA")
    pid_to_pdid = {}
    for row in cursor:
        pid_to_pdid[row[1]] = row[0]

    obsoleting_pkgs = []

    # trans_data construction
    h_cursor.execute('SELECT tid, pkgtupid, done, state FROM trans_data_pkgs')
    for row in h_cursor:
        state = row[3]
        pid = int(row[1])
        tid = int(row[0])

        # handle Obsoleting packages - save it as separate attribute
        if state == 'Obsoleting':
            obsoleting_pkgs.append((tid, pid))
            continue

        data = [''] * len(TRANS_DATA)
        pdid = pid_to_pdid.get(pid, 0)

        if not pdid:
            # create new entry
            cursor.execute("INSERT INTO PACKAGE_DATA VALUES (null,?,'','','','','')", (pid,))
            cursor.execute('SELECT last_insert_rowid()')
            pdid = cursor.fetchone()[0]
        else:
            # use this entry and delete it from the DB
            del pid_to_pdid[pid]

        # insert trans_data record
        data[TRANS_DATA.index('state')] = bind_state(cursor, state)
        data[TRANS_DATA.index('PD_ID')] = pdid
        data[TRANS_DATA.index('done')] = 1 if row[2] == 'TRUE' else 0
        data[0] = row[0]
        cursor.execute('INSERT INTO TRANS_DATA VALUES (null,?,?,?,?,?,?,?)', data)

    update_cmd = """UPDATE TRANS_DATA SET obsoleting=1 WHERE TD_ID IN (
                        SELECT TD_ID FROM PACKAGE_DATA JOIN TRANS_DATA using (PD_ID)
                            WHERE T_ID=? and P_ID=?)"""

    # set flag for Obsoleting PD_IDs
    for keys in obsoleting_pkgs:
        cursor.execute(update_cmd, keys)

    # save changes
    database.commit()

    trans_cmd = """SELECT tid, trans_beg.timestamp, trans_end.timestamp, trans_beg.rpmdb_version,
                trans_end.rpmdb_version, cmdline, loginuid, null, return_code
                FROM trans_beg join trans_end using(tid) join trans_cmdline using(tid)"""

    # Construction of TRANS
    h_cursor.execute(trans_cmd)
    for row in h_cursor:
        # override empty releasever
        r = list(row)
        del r[7]
        cursor.execute("INSERT INTO TRANS VALUES (?,?,?,?,?,?,?,'',?)", r)

    # get releasever for transactions
    cursor.execute('SELECT T_ID FROM TRANS WHERE releasever=?', ('', ))
    missing = cursor.fetchall()
    for row in missing:
        tid = row[0]
        cmd = "SELECT P_ID FROM TRANS_DATA join PACKAGE_DATA using (PD_ID) WHERE T_ID=? LIMIT 1"
        cursor.execute(cmd, (tid,))
        pids = cursor.fetchall()
        for pid in pids:
            h_cursor.execute("""SELECT yumdb_val FROM pkg_yumdb WHERE pkgtupid=? AND
                             yumdb_key='releasever' LIMIT 1""", pid)
            rlsver = h_cursor.fetchone()
            if rlsver:
                cursor.execute("UPDATE TRANS SET releasever=? WHERE T_ID=?", (rlsver[0], tid))
                break

    # collect reasons
    cursor.execute("""SELECT TD_ID, P_ID FROM TRANS_DATA join PACKAGE_DATA using(PD_ID)
                   join PACKAGE using(P_ID)""")
    missing = cursor.fetchall()
    for row in missing:
        h_cursor.execute("""SELECT yumdb_val FROM pkg_yumdb WHERE pkgtupid=? AND yumdb_key='reason'
                         LIMIT 1""", (row[1],))
        reason = h_cursor.fetchone()
        if reason:
            t_reason = convert_reason(reason[0])
            cursor.execute('UPDATE TRANS_DATA SET reason=? WHERE TD_ID=?', (t_reason, row[0]))

    # fetch additional data from yumdb
    get_yumdb_packages(cursor, yumdb_path, bind_repo)

    # contruction of OUTPUT
    h_cursor.execute('SELECT * FROM trans_script_stdout')
    for row in h_cursor:
        cursor.execute('INSERT INTO OUTPUT VALUES (null,?,?,?)',
                       (row[1], row[2], BIND_OUTPUT(cursor, 'stdout')))

    h_cursor.execute('SELECT * FROM trans_error')
    for row in h_cursor:
        cursor.execute('INSERT INTO OUTPUT VALUES (null,?,?,?)',
                       (row[1], row[2], BIND_OUTPUT(cursor, 'stderr')))

    # construction of GROUPS
    if os.path.isfile(groups_path):
        with open(groups_path) as groups_file:
            data = json.load(groups_file)
            for key in data:
                if key == 'GROUPS':
                    for value in data[key]:
                        record_G = [''] * len(GROUPS)
                        record_G[GROUPS.index('name_id')] = value

                        if 'name' in data[key][value]:
                            record_G[GROUPS.index('name')] = data[key][value]['name']

                        record_G[GROUPS.index('pkg_types')] = data[key][value]['pkg_types']

                        record_G[GROUPS.index('installed')] = True
                        if 'ui_name' in data[key][value]:
                            record_G[GROUPS.index('ui_name')] = data[key][value]['ui_name']

                        cursor.execute('''INSERT INTO GROUPS
                                       VALUES (null,?,?,?,?,?)''',
                                       (record_G))
                        cursor.execute('SELECT last_insert_rowid()')
                        tmp_gid = cursor.fetchone()[0]
                        for package in data[key][value]['full_list']:
                            ADD_GROUPS_PACKAGE(cursor, tmp_gid, package)
                        for package in data[key][value]['pkg_exclude']:
                            ADD_GROUPS_EXCLUDE(cursor, tmp_gid, package)
            for key in data:

                if key == 'ENVIRONMENTS':
                    for value in data[key]:
                        record_E = [''] * len(ENVIRONMENTS)
                        record_E[GROUPS.index('name_id')] = value
                        if 'name' in data[key][value]:
                            record_G[GROUPS.index('name')] = data[key][value]['name']
                        record_E[ENVIRONMENTS.index('grp_types')] = data[key][value]['grp_types']
                        record_E[ENVIRONMENTS.index('pkg_types')] = data[key][value]['pkg_types']
                        if 'ui_name' in data[key][value]:
                            record_E[ENVIRONMENTS.index('ui_name')] = data[key][value]['ui_name']

                        cursor.execute('''INSERT INTO ENVIRONMENTS
                                       VALUES (null,?,?,?,?,?)''',
                                       (record_E))
                        cursor.execute('SELECT last_insert_rowid()')
                        tmp_eid = cursor.fetchone()[0]

                        for package in data[key][value]['full_list']:
                            BIND_ENV_GROUP(cursor, tmp_eid, package)
                        for package in data[key][value]['pkg_exclude']:
                            ADD_ENV_EXCLUDE(cursor, tmp_eid, package)

    # construction of TRANS_GROUP_DATA from GROUPS
    cursor.execute('SELECT * FROM GROUPS')
    tmp_groups = cursor.fetchall()
    for row in tmp_groups:
        command = []
        for pattern in row[1:4]:
            if pattern:
                command.append("cmdline LIKE '%{}%'".format(pattern))
        if command:
            cursor.execute("SELECT T_ID FROM TRANS WHERE " + " or ".join(command))
            tmp_trans = cursor.fetchall()
            if tmp_trans:
                for single_trans in tmp_trans:
                    data = (single_trans[0], row[0], row[1], row[2], row[3], row[4], row[5])
                    cursor.execute("INSERT INTO TRANS_GROUP_DATA VALUES(null,?,?,?,?,?,?,?)", data)

    # construction of TRANS_GROUP_DATA from ENVIRONMENTS
    cursor.execute('SELECT * FROM ENVIRONMENTS WHERE ui_name!=?', ('', ))
    tmp_env = cursor.fetchall()
    for row in tmp_env:
        command = []
        for pattern in row[1:4]:
            if pattern:
                command.append("cmdline LIKE '%{}%'".format(pattern))
        if command:
            cursor.execute("SELECT T_ID FROM TRANS WHERE " + " or ".join(command))
            tmp_trans = cursor.fetchall()
            if tmp_trans:
                for trans in tmp_trans:
                    cursor.execute("SELECT G_ID FROM ENVIRONMENTS_GROUPS WHERE E_ID=?", (row[0],))
                    tmp_groups = cursor.fetchall()
                    for gid in tmp_groups:
                        cursor.execute("SELECT * FROM GROUPS WHERE G_ID=?", (gid[0],))
                        data = cursor.fetchone()
                        tgdata = (trans[0], data[0], data[1], data[2], data[3], data[4], data[5])
                        cursor.execute("INSERT INTO TRANS_GROUP_DATA VALUES(null,?,?,?,?,?,?,?)",
                                       tgdata)

    # create Transaction performed with package
    h_cursor.execute('SELECT tid, pkgtupid FROM trans_with_pkgs')
    for row in h_cursor:
        cursor.execute('INSERT INTO TRANS_WITH VALUES (null,?,?)', row)

    # save changes
    database.commit()

    # close connection
    database.close()
    historyDB.close()

    # successful
    os.rename(tmp_output_file, output_file)

    return True
Example #28
0
 def transform(self, input_dir):
     """ Interface for database transformation """
     if not self._swdb:
         self._initSwdb(input_dir)
     else:
         logger.error(_('Error: database is already initialized'))
Example #29
0
    def install(self, module_specs, strict=True):
        no_match_specs, error_specs, solver_errors, module_dicts = \
            self._resolve_specs_enable_update_sack(module_specs)

        # <package_name, set_of_spec>
        install_dict = {}
        install_set_artefacts = set()
        for spec, (nsvcap, moduledict) in module_dicts.items():
            for name, streamdict in moduledict.items():
                for stream, module_list in streamdict.items():
                    install_module_list = [
                        x for x in module_list
                        if self.base._moduleContainer.isModuleActive(x.getId())
                    ]
                    if not install_module_list:
                        error_specs.append(spec)
                        continue
                    profiles = []
                    latest_module = self._get_latest(install_module_list)
                    if nsvcap.profile:
                        profiles.extend(
                            latest_module.getProfiles(nsvcap.profile))
                        if not profiles:
                            logger.error(
                                _("Unable to match profile in argument {}").
                                format(spec))
                            no_match_specs.append(spec)
                            continue
                    else:
                        profiles_strings = self.base._moduleContainer.getDefaultProfiles(
                            name, stream)
                        if not profiles_strings:
                            logger.error(
                                _("No default profiles for module {}:{}").
                                format(name, stream))
                        for profile in set(profiles_strings):
                            module_profiles = latest_module.getProfiles(
                                profile)
                            if not module_profiles:
                                logger.error(
                                    _("Profile {} not matched for module {}:{}"
                                      ).format(profile, name, stream))

                            profiles.extend(module_profiles)
                    for profile in profiles:
                        self.base._moduleContainer.install(
                            latest_module, profile.getName())
                        for pkg_name in profile.getContent():
                            install_dict.setdefault(pkg_name, set()).add(spec)
                    for module in install_module_list:
                        install_set_artefacts.update(module.getArtifacts())
        install_base_query = self.base.sack.query().filterm(
            nevra_strict=install_set_artefacts).apply()

        # add hot-fix packages
        hot_fix_repos = [
            i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes
        ]
        hotfix_packages = self.base.sack.query().filterm(
            reponame=hot_fix_repos).filterm(name=install_dict.keys())
        install_base_query = install_base_query.union(hotfix_packages)

        for pkg_name, set_specs in install_dict.items():
            query = install_base_query.filter(name=pkg_name)
            if not query:
                # package can also be non-modular or part of another stream
                query = self.base.sack.query().filterm(name=pkg_name)
                if not query:
                    for spec in set_specs:
                        logger.error(
                            _("Unable to resolve argument {}").format(spec))
                    logger.error(_("No match for package {}").format(pkg_name))
                    error_specs.extend(set_specs)
                    continue
            self.base._goal.group_members.add(pkg_name)
            sltr = dnf.selector.Selector(self.base.sack)
            sltr.set(pkg=query)
            self.base._goal.install(select=sltr, optional=(not strict))
        if no_match_specs or error_specs or solver_errors:
            raise dnf.exceptions.MarkingErrors(
                no_match_group_specs=no_match_specs,
                error_group_specs=error_specs,
                module_debsolv_errors=solver_errors)
Example #30
0
    def install(self, module_specs, strict=True):
        # :api
        no_match_specs, error_specs, solver_errors, module_dicts = \
            self._resolve_specs_enable_update_sack(module_specs)

        # <package_name, set_of_spec>
        fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME
        install_dict = {}
        install_set_artifacts = set()
        fail_safe_repo_used = False
        for spec, (nsvcap, moduledict) in module_dicts.items():
            for name, streamdict in moduledict.items():
                for stream, module_list in streamdict.items():
                    install_module_list = [
                        x for x in module_list
                        if self.base._moduleContainer.isModuleActive(x.getId())
                    ]
                    if not install_module_list:
                        logger.error(
                            _("All matches for argument '{0}' in module '{1}:{2}' are not "
                              "active").format(spec, name, stream))
                        error_specs.append(spec)
                        continue
                    profiles = []
                    latest_module = self._get_latest(install_module_list)
                    if latest_module.getRepoID() == fail_safe_repo:
                        msg = _(
                            "Installing module '{0}' from Fail-Safe repository {1} is not allowed"
                        )
                        logger.critical(
                            msg.format(latest_module.getNameStream(),
                                       fail_safe_repo))
                        fail_safe_repo_used = True
                    if nsvcap.profile:
                        profiles.extend(
                            latest_module.getProfiles(nsvcap.profile))
                        if not profiles:
                            available_profiles = latest_module.getProfiles()
                            if available_profiles:
                                profile_names = ", ".join(
                                    sorted([
                                        profile.getName()
                                        for profile in available_profiles
                                    ]))
                                msg = _(
                                    "Unable to match profile for argument {}. Available "
                                    "profiles for '{}:{}': {}").format(
                                        spec, name, stream, profile_names)
                            else:
                                msg = _(
                                    "Unable to match profile for argument {}"
                                ).format(spec)
                            logger.error(msg)
                            no_match_specs.append(spec)
                            continue
                    else:
                        profiles_strings = self.base._moduleContainer.getDefaultProfiles(
                            name, stream)
                        if not profiles_strings:
                            available_profiles = latest_module.getProfiles()
                            if available_profiles:
                                profile_names = ", ".join(
                                    sorted([
                                        profile.getName()
                                        for profile in available_profiles
                                    ]))
                                msg = _(
                                    "No default profiles for module {}:{}. Available profiles"
                                    ": {}").format(name, stream, profile_names)
                            else:
                                msg = _("No profiles for module {}:{}").format(
                                    name, stream)
                            logger.error(msg)
                            error_specs.append(spec)
                        for profile in set(profiles_strings):
                            module_profiles = latest_module.getProfiles(
                                profile)
                            if not module_profiles:
                                logger.error(
                                    _("Default profile {} not available in module {}:{}"
                                      ).format(profile, name, stream))
                                error_specs.append(spec)

                            profiles.extend(module_profiles)
                    for profile in profiles:
                        self.base._moduleContainer.install(
                            latest_module, profile.getName())
                        for pkg_name in profile.getContent():
                            install_dict.setdefault(pkg_name, set()).add(spec)
                    for module in install_module_list:
                        install_set_artifacts.update(module.getArtifacts())
        if fail_safe_repo_used:
            raise dnf.exceptions.Error(
                _("Installing module from Fail-Safe repository is not allowed")
            )
        __, profiles_errors = self._install_profiles_internal(
            install_set_artifacts, install_dict, strict)
        if profiles_errors:
            error_specs.extend(profiles_errors)

        if no_match_specs or error_specs or solver_errors:
            raise dnf.exceptions.MarkingErrors(
                no_match_group_specs=no_match_specs,
                error_group_specs=error_specs,
                module_depsolv_errors=solver_errors)
Example #31
0
def transformSwdb(input_dir, output_file):
    yumdb_path = os.path.join(input_dir, 'yumdb')
    history_path = os.path.join(input_dir, 'history')
    groups_path = os.path.join(input_dir, 'groups.json')

    state_dict = {}
    repo_dict = {}

    # create binding with STATE_TYPE - returns ID
    def bind_state(cursor, desc):
        code = state_dict.get(desc)
        if code:
            return code
        cursor.execute('SELECT state FROM STATE_TYPE WHERE description=?',
                       (desc, ))
        state_id = cursor.fetchone()
        if state_id is None:
            cursor.execute('INSERT INTO STATE_TYPE VALUES(null,?)', (desc, ))
            cursor.execute('SELECT last_insert_rowid()')
            state_id = cursor.fetchone()
        state_dict[desc] = state_id[0]
        return state_id[0]

    # create binding with repo - returns R_ID
    def bind_repo(cursor, name):
        code = repo_dict.get(name)
        if code:
            return code
        cursor.execute('SELECT R_ID FROM REPO WHERE name=?', (name, ))
        rid = cursor.fetchone()
        if rid is None:
            cursor.execute('INSERT INTO REPO VALUES(null,?)', (name, ))
            cursor.execute('SELECT last_insert_rowid()')
            rid = cursor.fetchone()
        repo_dict[name] = rid[0]
        return rid[0]

    # check path to yumdb dir
    if not os.path.isdir(yumdb_path):
        logger.error(_('Error: yumdb directory not valid'))
        return False

    # check path to history dir
    if not os.path.isdir(history_path):
        logger.error(_('Error: history directory not valid'))
        return False

    # check historyDB file and pick newest one
    historydb_file = glob.glob(os.path.join(history_path, "history*"))
    if len(historydb_file) < 1:
        logger.error(_('Error: history database file not valid'))
        return False
    historydb_file.sort()
    historydb_file = historydb_file[0]

    if not os.path.isfile(historydb_file):
        logger.error(_('Error: history database file not valid'))
        return False

    tmp_output_file = output_file + '.transform'
    try:
        # initialise historyDB
        historyDB = sqlite3.connect(historydb_file)
        h_cursor = historyDB.cursor()
    except:
        logger.error(
            _("ERROR: unable to open the database '{}'").format(
                historydb_file))
        return False

    try:
        # initialise output DB
        os.rename(output_file, tmp_output_file)
        database = sqlite3.connect(tmp_output_file)
        cursor = database.cursor()
    except:
        logger.error(
            _("ERROR: unable to create the database '{}'").format(
                tmp_output_file))
        return False

    # value distribution in tables
    PACKAGE_DATA = [
        'P_ID', 'R_ID', 'from_repo_revision', 'from_repo_timestamp',
        'installed_by', 'changed_by'
    ]

    TRANS_DATA = [
        'T_ID', 'PD_ID', 'TG_ID', 'done', 'obsoleting', 'reason', 'state'
    ]

    GROUPS = ['name_id', 'name', 'ui_name', 'installed', 'pkg_types']

    ENVIRONMENTS = ['name_id', 'name', 'ui_name', 'pkg_types', 'grp_types']

    logger.info(
        _("Transforming the software database. It may take some time."))

    # contruction of PACKAGE from pkgtups
    h_cursor.execute('SELECT * FROM pkgtups')
    for row in h_cursor:
        record_P = [
            row[0],  # P_ID
            row[1],  # name
            row[3],  # epoch
            row[4],  # version
            row[5],  # release
            row[2]  # arch
        ]
        if row[6]:
            checksum_type, checksum_data = row[6].split(":", 2)
            record_P.append(checksum_data)
            record_P.append(checksum_type)
        else:
            record_P += ['', '']
        record_P.append(SwdbItem.RPM)  # type
        cursor.execute('INSERT INTO PACKAGE VALUES (?,?,?,?,?,?,?,?,?)',
                       record_P)

    # save changes
    database.commit()

    # construction of PACKAGE_DATA according to pkg_yumdb
    actualPID = 0
    record_PD = [''] * len(PACKAGE_DATA)
    h_cursor.execute('SELECT * FROM pkg_yumdb')

    # for each row in pkg_yumdb
    for row in h_cursor:
        newPID = row[0]
        if actualPID != newPID:
            if actualPID != 0:
                record_PD[0] = actualPID
                # insert new record into PACKAGE_DATA
                PACKAGE_DATA_INSERT(cursor, record_PD)

            actualPID = newPID
            record_PD = [''] * len(PACKAGE_DATA)

        if row[1] in PACKAGE_DATA:
            # collect data for record from pkg_yumdb
            record_PD[PACKAGE_DATA.index(row[1])] = row[2]

        elif row[1] == "from_repo":
            # create binding with REPO table
            record_PD[1] = bind_repo(cursor, row[2])

    record_PD[0] = actualPID
    PACKAGE_DATA_INSERT(cursor, record_PD)  # insert last record

    # save changes
    database.commit()

    # prepare pid to pdid dictionary
    cursor.execute("SELECT PD_ID, P_ID FROM PACKAGE_DATA")
    pid_to_pdid = {}
    for row in cursor:
        pid_to_pdid[row[1]] = row[0]

    obsoleting_pkgs = []

    # trans_data construction
    h_cursor.execute('SELECT tid, pkgtupid, done, state FROM trans_data_pkgs')
    for row in h_cursor:
        state = row[3]
        pid = int(row[1])
        tid = int(row[0])

        # handle Obsoleting packages - save it as separate attribute
        if state == 'Obsoleting':
            obsoleting_pkgs.append((tid, pid))
            continue

        data = [''] * len(TRANS_DATA)
        pdid = pid_to_pdid.get(pid, 0)

        if not pdid:
            # create new entry
            cursor.execute(
                "INSERT INTO PACKAGE_DATA VALUES (null,?,'','','','','')",
                (pid, ))
            cursor.execute('SELECT last_insert_rowid()')
            pdid = cursor.fetchone()[0]
        else:
            # use this entry and delete it from the DB
            del pid_to_pdid[pid]

        # insert trans_data record
        data[TRANS_DATA.index('state')] = bind_state(cursor, state)
        data[TRANS_DATA.index('PD_ID')] = pdid
        data[TRANS_DATA.index('done')] = 1 if row[2] == 'TRUE' else 0
        data[0] = row[0]
        cursor.execute('INSERT INTO TRANS_DATA VALUES (null,?,?,?,?,?,?,?)',
                       data)

    update_cmd = """UPDATE TRANS_DATA SET obsoleting=1 WHERE TD_ID IN (
                        SELECT TD_ID FROM PACKAGE_DATA JOIN TRANS_DATA using (PD_ID)
                            WHERE T_ID=? and P_ID=?)"""

    # set flag for Obsoleting PD_IDs
    for keys in obsoleting_pkgs:
        cursor.execute(update_cmd, keys)

    # save changes
    database.commit()

    trans_cmd = """SELECT tid, trans_beg.timestamp, trans_end.timestamp, trans_beg.rpmdb_version,
                trans_end.rpmdb_version, cmdline, loginuid, null, return_code
                FROM trans_beg join trans_end using(tid) join trans_cmdline using(tid)"""

    # Construction of TRANS
    h_cursor.execute(trans_cmd)
    for row in h_cursor:
        # override empty releasever
        r = list(row)
        del r[7]
        cursor.execute("INSERT INTO TRANS VALUES (?,?,?,?,?,?,?,'',?)", r)

    # get releasever for transactions
    cursor.execute('SELECT T_ID FROM TRANS WHERE releasever=?', ('', ))
    missing = cursor.fetchall()
    for row in missing:
        tid = row[0]
        cmd = "SELECT P_ID FROM TRANS_DATA join PACKAGE_DATA using (PD_ID) WHERE T_ID=? LIMIT 1"
        cursor.execute(cmd, (tid, ))
        pids = cursor.fetchall()
        for pid in pids:
            h_cursor.execute(
                """SELECT yumdb_val FROM pkg_yumdb WHERE pkgtupid=? AND
                             yumdb_key='releasever' LIMIT 1""", pid)
            rlsver = h_cursor.fetchone()
            if rlsver:
                cursor.execute("UPDATE TRANS SET releasever=? WHERE T_ID=?",
                               (rlsver[0], tid))
                break

    # collect reasons
    cursor.execute(
        """SELECT TD_ID, P_ID FROM TRANS_DATA join PACKAGE_DATA using(PD_ID)
                   join PACKAGE using(P_ID)""")
    missing = cursor.fetchall()
    for row in missing:
        h_cursor.execute(
            """SELECT yumdb_val FROM pkg_yumdb WHERE pkgtupid=? AND yumdb_key='reason'
                         LIMIT 1""", (row[1], ))
        reason = h_cursor.fetchone()
        if reason:
            t_reason = convert_reason(reason[0])
            cursor.execute('UPDATE TRANS_DATA SET reason=? WHERE TD_ID=?',
                           (t_reason, row[0]))

    # fetch additional data from yumdb
    get_yumdb_packages(cursor, yumdb_path, bind_repo)

    # contruction of OUTPUT
    h_cursor.execute('SELECT * FROM trans_script_stdout')
    for row in h_cursor:
        cursor.execute('INSERT INTO OUTPUT VALUES (null,?,?,?)',
                       (row[1], row[2], BIND_OUTPUT(cursor, 'stdout')))

    h_cursor.execute('SELECT * FROM trans_error')
    for row in h_cursor:
        cursor.execute('INSERT INTO OUTPUT VALUES (null,?,?,?)',
                       (row[1], row[2], BIND_OUTPUT(cursor, 'stderr')))

    # construction of GROUPS
    if os.path.isfile(groups_path):
        with open(groups_path) as groups_file:
            data = json.load(groups_file)
            for key in data:
                if key == 'GROUPS':
                    for value in data[key]:
                        record_G = [''] * len(GROUPS)
                        record_G[GROUPS.index('name_id')] = value

                        if 'name' in data[key][value]:
                            record_G[GROUPS.index(
                                'name')] = data[key][value]['name']

                        record_G[GROUPS.index(
                            'pkg_types')] = data[key][value]['pkg_types']

                        record_G[GROUPS.index('installed')] = True
                        if 'ui_name' in data[key][value]:
                            record_G[GROUPS.index(
                                'ui_name')] = data[key][value]['ui_name']

                        cursor.execute(
                            '''INSERT INTO GROUPS
                                       VALUES (null,?,?,?,?,?)''', (record_G))
                        cursor.execute('SELECT last_insert_rowid()')
                        tmp_gid = cursor.fetchone()[0]
                        for package in data[key][value]['full_list']:
                            ADD_GROUPS_PACKAGE(cursor, tmp_gid, package)
                        for package in data[key][value]['pkg_exclude']:
                            ADD_GROUPS_EXCLUDE(cursor, tmp_gid, package)
            for key in data:

                if key == 'ENVIRONMENTS':
                    for value in data[key]:
                        record_E = [''] * len(ENVIRONMENTS)
                        record_E[GROUPS.index('name_id')] = value
                        if 'name' in data[key][value]:
                            record_G[GROUPS.index(
                                'name')] = data[key][value]['name']
                        record_E[ENVIRONMENTS.index(
                            'grp_types')] = data[key][value]['grp_types']
                        record_E[ENVIRONMENTS.index(
                            'pkg_types')] = data[key][value]['pkg_types']
                        if 'ui_name' in data[key][value]:
                            record_E[ENVIRONMENTS.index(
                                'ui_name')] = data[key][value]['ui_name']

                        cursor.execute(
                            '''INSERT INTO ENVIRONMENTS
                                       VALUES (null,?,?,?,?,?)''', (record_E))
                        cursor.execute('SELECT last_insert_rowid()')
                        tmp_eid = cursor.fetchone()[0]

                        for package in data[key][value]['full_list']:
                            BIND_ENV_GROUP(cursor, tmp_eid, package)
                        for package in data[key][value]['pkg_exclude']:
                            ADD_ENV_EXCLUDE(cursor, tmp_eid, package)

    # construction of TRANS_GROUP_DATA from GROUPS
    cursor.execute('SELECT * FROM GROUPS')
    tmp_groups = cursor.fetchall()
    for row in tmp_groups:
        command = []
        for pattern in row[1:4]:
            if pattern:
                command.append("cmdline LIKE '%{}%'".format(pattern))
        if command:
            cursor.execute("SELECT T_ID FROM TRANS WHERE " +
                           " or ".join(command))
            tmp_trans = cursor.fetchall()
            if tmp_trans:
                for single_trans in tmp_trans:
                    data = (single_trans[0], row[0], row[1], row[2], row[3],
                            row[4], row[5])
                    cursor.execute(
                        "INSERT INTO TRANS_GROUP_DATA VALUES(null,?,?,?,?,?,?,?)",
                        data)

    # construction of TRANS_GROUP_DATA from ENVIRONMENTS
    cursor.execute('SELECT * FROM ENVIRONMENTS WHERE ui_name!=?', ('', ))
    tmp_env = cursor.fetchall()
    for row in tmp_env:
        command = []
        for pattern in row[1:4]:
            if pattern:
                command.append("cmdline LIKE '%{}%'".format(pattern))
        if command:
            cursor.execute("SELECT T_ID FROM TRANS WHERE " +
                           " or ".join(command))
            tmp_trans = cursor.fetchall()
            if tmp_trans:
                for trans in tmp_trans:
                    cursor.execute(
                        "SELECT G_ID FROM ENVIRONMENTS_GROUPS WHERE E_ID=?",
                        (row[0], ))
                    tmp_groups = cursor.fetchall()
                    for gid in tmp_groups:
                        cursor.execute("SELECT * FROM GROUPS WHERE G_ID=?",
                                       (gid[0], ))
                        data = cursor.fetchone()
                        tgdata = (trans[0], data[0], data[1], data[2], data[3],
                                  data[4], data[5])
                        cursor.execute(
                            "INSERT INTO TRANS_GROUP_DATA VALUES(null,?,?,?,?,?,?,?)",
                            tgdata)

    # create Transaction performed with package
    h_cursor.execute('SELECT tid, pkgtupid FROM trans_with_pkgs')
    for row in h_cursor:
        cursor.execute('INSERT INTO TRANS_WITH VALUES (null,?,?)', row)

    # save changes
    database.commit()

    # close connection
    database.close()
    historyDB.close()

    # successful
    os.rename(tmp_output_file, output_file)

    return True
Example #32
0
    def switch_to(self, module_specs, strict=True):
        # :api
        no_match_specs, error_specs, module_dicts = self._resolve_specs_enable(
            module_specs)
        # collect name of artifacts from new modules for distrosync
        new_artifacts_names = set()
        # collect name of artifacts from active modules for distrosync before sack update
        active_artifacts_names = set()
        src_arches = {"nosrc", "src"}
        for spec, (nsvcap, moduledict) in module_dicts.items():
            for name in moduledict.keys():
                for module in self.base._moduleContainer.query(
                        name, "", "", "", ""):
                    if self.base._moduleContainer.isModuleActive(module):
                        for artifact in module.getArtifacts():
                            arch = artifact.rsplit(".", 1)[1]
                            if arch in src_arches:
                                continue
                            pkg_name = artifact.rsplit("-", 2)[0]
                            active_artifacts_names.add(pkg_name)

        solver_errors = self._update_sack()

        dependency_error_spec = self._enable_dependencies(module_dicts)
        if dependency_error_spec:
            error_specs.extend(dependency_error_spec)

        # <package_name, set_of_spec>
        fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME
        install_dict = {}
        install_set_artifacts = set()
        fail_safe_repo_used = False

        # list of name: [profiles] for module profiles being removed
        removed_profiles = self.base._moduleContainer.getRemovedProfiles()

        for spec, (nsvcap, moduledict) in module_dicts.items():
            for name, streamdict in moduledict.items():
                for stream, module_list in streamdict.items():
                    install_module_list = [
                        x for x in module_list
                        if self.base._moduleContainer.isModuleActive(x.getId())
                    ]
                    if not install_module_list:
                        "No active matches for argument '{0}' in module '{1}:{2}'"
                        logger.error(
                            _("No active matches for argument '{0}' in module "
                              "'{1}:{2}'").format(spec, name, stream))
                        error_specs.append(spec)
                        continue
                    profiles = []
                    latest_module = self._get_latest(install_module_list)
                    if latest_module.getRepoID() == fail_safe_repo:
                        msg = _(
                            "Installing module '{0}' from Fail-Safe repository {1} is not allowed"
                        )
                        logger.critical(
                            msg.format(latest_module.getNameStream(),
                                       fail_safe_repo))
                        fail_safe_repo_used = True
                    if nsvcap.profile:
                        profiles.extend(
                            latest_module.getProfiles(nsvcap.profile))
                        if not profiles:
                            available_profiles = latest_module.getProfiles()
                            if available_profiles:
                                profile_names = ", ".join(
                                    sorted([
                                        profile.getName()
                                        for profile in available_profiles
                                    ]))
                                msg = _(
                                    "Unable to match profile for argument {}. Available "
                                    "profiles for '{}:{}': {}").format(
                                        spec, name, stream, profile_names)
                            else:
                                msg = _(
                                    "Unable to match profile for argument {}"
                                ).format(spec)
                            logger.error(msg)
                            no_match_specs.append(spec)
                            continue
                    elif name in removed_profiles:

                        for profile in removed_profiles[name]:
                            module_profiles = latest_module.getProfiles(
                                profile)
                            if not module_profiles:
                                logger.warning(
                                    _("Installed profile '{0}' is not available in module "
                                      "'{1}' stream '{2}'").format(
                                          profile, name, stream))
                                continue
                            profiles.extend(module_profiles)
                    for profile in profiles:
                        self.base._moduleContainer.install(
                            latest_module, profile.getName())
                        for pkg_name in profile.getContent():
                            install_dict.setdefault(pkg_name, set()).add(spec)
                    for module in install_module_list:
                        artifacts = module.getArtifacts()
                        install_set_artifacts.update(artifacts)
                        for artifact in artifacts:
                            arch = artifact.rsplit(".", 1)[1]
                            if arch in src_arches:
                                continue
                            pkg_name = artifact.rsplit("-", 2)[0]
                            new_artifacts_names.add(pkg_name)
        if fail_safe_repo_used:
            raise dnf.exceptions.Error(
                _("Installing module from Fail-Safe repository is not allowed")
            )
        install_base_query, profiles_errors = self._install_profiles_internal(
            install_set_artifacts, install_dict, strict)
        if profiles_errors:
            error_specs.extend(profiles_errors)

        # distrosync module name
        all_names = set()
        all_names.update(new_artifacts_names)
        all_names.update(active_artifacts_names)
        remove_query = self.base.sack.query().filterm(empty=True)
        base_no_source_query = self.base.sack.query().filterm(
            arch__neq=['src', 'nosrc']).apply()

        for pkg_name in all_names:
            query = base_no_source_query.filter(name=pkg_name)
            installed = query.installed()
            if not installed:
                continue
            available = query.available()
            if not available:
                logger.warning(
                    _("No packages available to distrosync for package name "
                      "'{}'").format(pkg_name))
                if pkg_name not in new_artifacts_names:
                    remove_query = remove_query.union(query)
                continue

            only_new_module = query.intersection(install_base_query)
            if only_new_module:
                query = only_new_module
            sltr = dnf.selector.Selector(self.base.sack)
            sltr.set(pkg=query)
            self.base._goal.distupgrade(select=sltr)
        self.base._remove_if_unneeded(remove_query)

        if no_match_specs or error_specs or solver_errors:
            raise dnf.exceptions.MarkingErrors(
                no_match_group_specs=no_match_specs,
                error_group_specs=error_specs,
                module_depsolv_errors=solver_errors)
Example #33
0
 def transform(self, input_dir):
     """ Interface for database transformation """
     if not self._swdb:
         self._initSwdb(input_dir)
     else:
         logger.error(_('Error: database is already initialized'))
    def install(self, module_specs, strict=True):
        no_match_specs, error_specs, solver_errors, module_dicts = \
            self._resolve_specs_enable_update_sack(module_specs)

        # <package_name, set_of_spec>
        install_dict = {}
        install_set_artefacts = set()
        for spec, (nsvcap, moduledict) in module_dicts.items():
            for name, streamdict in moduledict.items():
                for stream, module_list in streamdict.items():
                    install_module_list = [x for x in module_list
                                           if self.base._moduleContainer.isModuleActive(x.getId())]
                    if not install_module_list:
                        error_specs.append(spec)
                        continue
                    profiles = []
                    latest_module = self._get_latest(install_module_list)
                    if nsvcap.profile:
                        profiles.extend(latest_module.getProfiles(nsvcap.profile))
                        if not profiles:
                            logger.error(_("Unable to match profile in argument {}").format(spec))
                            no_match_specs.append(spec)
                            continue
                    else:
                        profiles_strings = self.base._moduleContainer.getDefaultProfiles(
                            name, stream)
                        if not profiles_strings:
                            logger.error(_("No default profiles for module {}:{}").format(
                                name, stream))
                        for profile in set(profiles_strings):
                            module_profiles = latest_module.getProfiles(profile)
                            if not module_profiles:
                                logger.error(
                                    _("Profile {} not matched for module {}:{}").format(
                                        profile, name, stream))

                            profiles.extend(module_profiles)
                    for profile in profiles:
                        self.base._moduleContainer.install(latest_module ,profile.getName())
                        for pkg_name in profile.getContent():
                            install_dict.setdefault(pkg_name, set()).add(spec)
                    for module in install_module_list:
                        install_set_artefacts.update(module.getArtifacts())
        install_base_query = self.base.sack.query().filterm(
            nevra_strict=install_set_artefacts).apply()

        # add hot-fix packages
        hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
        hotfix_packages = self.base.sack.query().filterm(reponame=hot_fix_repos).filterm(
            name=install_dict.keys())
        install_base_query = install_base_query.union(hotfix_packages)

        for pkg_name, set_specs in install_dict.items():
            query = install_base_query.filter(name=pkg_name)
            if not query:
                # package can also be non-modular or part of another stream
                query = self.base.sack.query().filterm(name=pkg_name)
                if not query:
                    for spec in set_specs:
                        logger.error(_("Unable to resolve argument {}").format(spec))
                    logger.error(_("No match for package {}").format(pkg_name))
                    error_specs.extend(set_specs)
                    continue
            self.base._goal.group_members.add(pkg_name)
            sltr = dnf.selector.Selector(self.base.sack)
            sltr.set(pkg=query)
            self.base._goal.install(select=sltr, optional=(not strict))
        if no_match_specs or error_specs or solver_errors:
            raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs,
                                               error_group_specs=error_specs,
                                               module_depsolv_errors=solver_errors)
Example #35
0
    def install(self, module_specs, strict=True):
        # :api
        no_match_specs, error_specs, solver_errors, module_dicts = \
            self._resolve_specs_enable_update_sack(module_specs)

        # <package_name, set_of_spec>
        fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME
        install_dict = {}
        install_set_artefacts = set()
        fail_safe_repo_used = False
        for spec, (nsvcap, moduledict) in module_dicts.items():
            for name, streamdict in moduledict.items():
                for stream, module_list in streamdict.items():
                    install_module_list = [x for x in module_list
                                           if self.base._moduleContainer.isModuleActive(x.getId())]
                    if not install_module_list:
                        logger.error(_("All matches for argument '{0}' in module '{1}:{2}' are not "
                                       "active").format(spec, name, stream))
                        error_specs.append(spec)
                        continue
                    profiles = []
                    latest_module = self._get_latest(install_module_list)
                    if latest_module.getRepoID() == fail_safe_repo:
                        msg = _(
                            "Installing module '{0}' from Fail-Safe repository {1} is not allowed")
                        logger.critical(msg.format(latest_module.getNameStream(), fail_safe_repo))
                        fail_safe_repo_used = True
                    if nsvcap.profile:
                        profiles.extend(latest_module.getProfiles(nsvcap.profile))
                        if not profiles:
                            available_profiles = latest_module.getProfiles()
                            if available_profiles:
                                profile_names = ", ".join(sorted(
                                    [profile.getName() for profile in available_profiles]))
                                msg = _("Unable to match profile for argument {}. Available "
                                        "profiles for '{}:{}': {}").format(
                                    spec, name, stream, profile_names)
                            else:
                                msg = _("Unable to match profile for argument {}").format(spec)
                            logger.error(msg)
                            no_match_specs.append(spec)
                            continue
                    else:
                        profiles_strings = self.base._moduleContainer.getDefaultProfiles(
                            name, stream)
                        if not profiles_strings:
                            available_profiles = latest_module.getProfiles()
                            if available_profiles:
                                profile_names = ", ".join(sorted(
                                    [profile.getName() for profile in available_profiles]))
                                msg = _("No default profiles for module {}:{}. Available profiles"
                                        ": {}").format(
                                    name, stream, profile_names)
                            else:
                                msg = _("No profiles for module {}:{}").format(name, stream)
                            logger.error(msg)
                            error_specs.append(spec)
                        for profile in set(profiles_strings):
                            module_profiles = latest_module.getProfiles(profile)
                            if not module_profiles:
                                logger.error(
                                    _("Default profile {} not available in module {}:{}").format(
                                        profile, name, stream))
                                error_specs.append(spec)

                            profiles.extend(module_profiles)
                    for profile in profiles:
                        self.base._moduleContainer.install(latest_module ,profile.getName())
                        for pkg_name in profile.getContent():
                            install_dict.setdefault(pkg_name, set()).add(spec)
                    for module in install_module_list:
                        install_set_artefacts.update(module.getArtifacts())
        if fail_safe_repo_used:
            raise dnf.exceptions.Error(_(
                "Installing module from Fail-Safe repository is not allowed"))
        install_base_query = self.base.sack.query().filterm(
            nevra_strict=install_set_artefacts).apply()

        # add hot-fix packages
        hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes]
        hotfix_packages = self.base.sack.query().filterm(reponame=hot_fix_repos).filterm(
            name=install_dict.keys())
        install_base_query = install_base_query.union(hotfix_packages)

        for pkg_name, set_specs in install_dict.items():
            query = install_base_query.filter(name=pkg_name)
            if not query:
                # package can also be non-modular or part of another stream
                query = self.base.sack.query().filterm(name=pkg_name)
                if not query:
                    for spec in set_specs:
                        logger.error(_("Unable to resolve argument {}").format(spec))
                    logger.error(_("No match for package {}").format(pkg_name))
                    error_specs.extend(set_specs)
                    continue
            self.base._goal.group_members.add(pkg_name)
            sltr = dnf.selector.Selector(self.base.sack)
            sltr.set(pkg=query)
            self.base._goal.install(select=sltr, optional=(not strict))
        if no_match_specs or error_specs or solver_errors:
            raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs,
                                               error_group_specs=error_specs,
                                               module_depsolv_errors=solver_errors)
Example #36
0
        def run_on_module(self):
            skipped_groups = self.module_base.remove(self.opts.module_spec)
            if not skipped_groups:
                return

            logger.error(dnf.exceptions.MarkingErrors(no_match_group_specs=skipped_groups))