def enable(self, stream, assumeyes=False): if stream not in self: raise NoStreamException("{}:{}".format(self.name, stream)) if self.conf.enabled._get() and self.conf.stream._get() == stream: return if self.conf.stream._get() is not "" and \ str(self.conf.stream._get()) != str(stream) and \ not assumeyes: logger.info(module_messages[DIFFERENT_STREAM_INFO].format( self.name)) if not self.parent.base.conf.assumeno and \ self.parent.base.output.userconfirm(): self.parent.base._module_persistor.set_data(self, version=-1, profiles=set()) self.enable(stream, True) else: raise EnabledStreamException("{}:{}".format(self.name, stream)) self.parent.base._module_persistor.set_data(self, stream=stream, enabled=True)
def run_on_module(self): skipped_groups = self.module_base.remove(self.opts.module_spec) if self.opts.all: modules_from_specs = self._get_modules_from_name_stream_specs() remove_names_from_spec, __ = self._get_module_artifact_names( modules_from_specs, set()) keep_names, __ = self._get_module_artifact_names( self.base._moduleContainer.getModulePackages(), modules_from_specs) remove_query = self.base.sack.query().installed().filterm( name=remove_names_from_spec) keep_query = self.base.sack.query().installed().filterm( name=keep_names) for pkg in remove_query: if pkg in keep_query: msg = _( "Package {} belongs to multiple modules, skipping" ).format(pkg) logger.info(msg) else: self.base.goal.erase(pkg, clean_deps=self.base.conf. clean_requirements_on_remove) if not skipped_groups: return logger.error( dnf.exceptions.MarkingErrors( no_match_group_specs=skipped_groups))
def run_on_module(self): module_versions = dict() for module_ns in self.opts.module_spec: subj = ModuleSubject(module_ns) module_version, module_form = subj.find_module_version( self.base.repo_module_dict) if module_version.name in module_versions: raise EnableMultipleStreamsException(module_version.name) module_versions[module_version.name] = (module_version, module_form) for module_version, module_form in module_versions.values(): if module_form.profile: logger.info("Ignoring unnecessary profile: '{}/{}'".format( module_form.name, module_form.profile)) self.base.repo_module_dict.enable_by_version(module_version) self.base.do_transaction() logger.info( _("\nTo switch to the new streams' RPMs, run '{} distro-sync'. \n" "Then migrate configuration files and data as necessary.". format(os.path.basename(sys.argv[0]))))
def _modules_reset_or_disable(self, module_specs, to_state): no_match_specs = [] for spec in module_specs: module_list, nsvcap = self._get_modules(spec) if not module_list: logger.error(_("Unable to resolve argument {}").format(spec)) no_match_specs.append(spec) continue if nsvcap.profile: logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) module_names = set() for module in module_list: module_names.add(module.getName()) for name in module_names: if to_state == STATE_UNKNOWN: self.base._moduleContainer.reset(name) if to_state == STATE_DISABLED: self.base._moduleContainer.disable(name) hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes] self.base.sack.filter_modules(self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot, self.base.conf.module_platform_id, update_only=True) return no_match_specs
def _get_info_profiles(self, module_specs): output = set() for module_spec in module_specs: module_list, nsvcap = self._get_modules(module_spec) if not module_list: logger.info( _("Unable to resolve argument {}").format(module_spec)) continue if nsvcap.profile: logger.info( _("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) for module in module_list: lines = OrderedDict() lines["Name"] = module.getFullIdentifier() for profile in sorted(module.getProfiles(), key=_profile_comparison_key): lines[profile.getName()] = "\n".join( [pkgName for pkgName in profile.getContent()]) output.add(self._create_simple_table(lines).toString()) return "\n\n".join(sorted(output))
def _modules_reset_or_disable(self, module_specs, to_state): no_match_specs = [] for spec in module_specs: module_list, nsvcap = self._get_modules(spec) if not module_list: logger.error(_("Unable to resolve argument {}").format(spec)) no_match_specs.append(spec) continue if nsvcap.stream or nsvcap.version or nsvcap.context or nsvcap.arch or nsvcap.profile: logger.info(_("Only module name is required. " "Ignoring unneeded information in argument: '{}'").format(spec)) module_names = set() for module in module_list: module_names.add(module.getName()) for name in module_names: if to_state == STATE_UNKNOWN: self.base._moduleContainer.reset(name) if to_state == STATE_DISABLED: self.base._moduleContainer.disable(name) hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes] try: solver_errors = self.base.sack.filter_modules( self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot, self.base.conf.module_platform_id, update_only=True, debugsolver=self.base.conf.debug_solver) except hawkey.Exception as e: raise dnf.exceptions.Error(ucd(e)) return no_match_specs, solver_errors
def print_what_provides(self, rpms): output = "" versions = self.list_module_version_all() for version in versions: nevras = version.artifacts() for nevra in nevras: subj = Subject(nevra) nevra_obj = list( subj.get_nevra_possibilities(hawkey.FORM_NEVRA))[0] if nevra_obj.name not in rpms: continue profiles = [] for profile in version.profiles: if nevra_obj.name in version.rpms(profile): profiles.append(profile) lines = { "Module": version.full_version, "Profiles": " ".join(profiles), "Repo": version.repo.id, "Summary": version.summary() } table = self.create_simple_table(lines) output += "{}\n".format(self.base.output.term.bold(nevra)) output += "{}\n\n".format(table.toString()) logger.info(output[:-2])
def get_full_info(self, module_spec): subj = ModuleSubject(module_spec) module_version, module_form = subj.find_module_version(self) if module_form.profile: logger.info("Ignoring unnecessary profile: '{}/{}'".format( module_form.name, module_form.profile)) return module_version.module_metadata.dumps().rstrip("\n")
def _get_info(self, module_specs): output = [] for module_spec in module_specs: module_list, nsvcap = self._get_modules(module_spec) if not module_list: logger.info( _("Unable to resolve argument {}").format(module_spec)) continue if nsvcap.profile: logger.info( _("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) for modulePackage in module_list: default_str = "" if modulePackage.getStream( ) == self.base._moduleContainer.getDefaultStream( modulePackage.getName()): default_str = " [d]" enabled_str = "" if self.base._moduleContainer.isEnabled(modulePackage): if not default_str: enabled_str = " " enabled_str += "[e]" installed_profiles = self.base._moduleContainer.getInstalledProfiles( modulePackage.getName()) available_profiles = modulePackage.getProfiles() default_profiles = self.base._moduleContainer.getDefaultProfiles( modulePackage.getName(), modulePackage.getStream()) profiles_str = "" for profile in available_profiles: profiles_str += "{}{}".format( profile.getName(), " [d]" if profile.getName() in default_profiles else "") profiles_str += " [i], " if profile in installed_profiles and enabled_str \ else ", " profiles_str = profiles_str[:-2] lines = OrderedDict() lines["Name"] = modulePackage.getName() lines["Stream"] = modulePackage.getStream( ) + default_str + enabled_str lines["Version"] = modulePackage.getVersion() lines["Profiles"] = profiles_str lines["Default profiles"] = " ".join(default_profiles) lines["Repo"] = modulePackage.getRepoID() lines["Summary"] = modulePackage.getSummary() lines["Description"] = modulePackage.getDescription() lines["Artifacts"] = "\n".join( sorted(modulePackage.getArtifacts())) output.append(self._create_simple_table(lines).toString()) str_table = "\n\n".join(sorted(set(output))) if str_table: str_table += "\n\nHint: [d]efault, [e]nabled, [i]nstalled" return str_table
def get_info(self, module_spec): subj = ModuleSubject(module_spec) module_version, module_form = subj.find_module_version(self) if module_form.profile: logger.info("Ignoring unnecessary profile: '{}/{}'".format( module_form.name, module_form.profile)) conf = module_version.repo_module.conf default_stream = module_version.repo_module.defaults.peek_default_stream( ) default_str = " [d]" if module_version.stream == default_stream else "" enabled_str = "" if module_version.stream == conf.stream._get() and conf.state._get( ) == "enabled": if not default_str: enabled_str = " " enabled_str += "[e]" default_profiles = [] stream = module_form.stream or module_version.repo_module.defaults.peek_default_stream( ) profile_defaults = module_version.repo_module.defaults.peek_profile_defaults( ) if stream in profile_defaults: default_profiles.extend(profile_defaults[stream].dup()) profiles_str = "" available_profiles = module_version.profiles installed_profiles = [] if module_version.stream == conf.stream._get(): installed_profiles = list(conf.profiles._get()) for profile in available_profiles: profiles_str += "{}{}".format( profile, " [d]" if profile in default_profiles else "") profiles_str += "[i], " if profile in installed_profiles else ", " profiles_str = profiles_str[:-2] lines = OrderedDict() lines["Name"] = module_version.name lines["Stream"] = module_version.stream + default_str + enabled_str lines["Version"] = module_version.version lines["Profiles"] = profiles_str lines["Default profiles"] = " ".join(default_profiles) lines["Repo"] = module_version.repo.id lines["Summary"] = module_version.summary() lines["Description"] = module_version.description() lines["Artifacts"] = "\n".join(sorted(module_version.artifacts())) str_table = str(self.create_simple_table(lines)) return str_table + "\n\nHint: [d]efault, [e]nabled, [i]nstalled"
def enable(self, module_specs): no_match_specs, error_specs, module_dicts = self._resolve_specs_enable_update_sack( module_specs) for spec, (nsvcap, module_dict) in module_dicts.items(): if nsvcap.profile: logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) if no_match_specs or error_specs: raise dnf.module.exceptions.ModuleMarkingError(no_match_specs=no_match_specs, error_specs=error_specs)
def enable(self, module_specs): no_match_specs, error_specs, solver_errors, module_dicts = \ self._resolve_specs_enable_update_sack(module_specs) for spec, (nsvcap, module_dict) in module_dicts.items(): if nsvcap.profile: logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) if no_match_specs or error_specs or solver_errors: raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs, error_group_specs=error_specs, module_depsolv_errors=solver_errors)
def run_on_module(self): for module_n in self.opts.module_nsvp: subj = ModuleSubject(module_n) module_version, module_form = subj.find_module_version(self.base.repo_module_dict) if module_form.profile: logger.info("Ignoring unnecessary profile: '{}/{}'".format(module_form.name, module_form.profile)) self.base.repo_module_dict.disable_by_version(module_version, True) logger.info("Module stream has been disabled: {}:{}".format(module_version.name, module_version.stream))
def run_on_module(self): for spec in self.opts.module_nsvp: try: print() if self.opts.verbose: print(self.base.repo_module_dict.get_full_info(spec)) elif self.opts.profile: print(self.base.repo_module_dict.get_info_profiles(spec)) else: print(self.base.repo_module_dict.get_info(spec)) except NoModuleException as e: logger.info(e)
def run_on_module(self): for module_n in self.opts.module_spec: subj = ModuleSubject(module_n) module_version, module_form = subj.find_module_version( self.base.repo_module_dict) if module_form.profile: logger.info("Ignoring unnecessary profile: '{}/{}'".format( module_form.name, module_form.profile)) self.base.repo_module_dict.reset_by_version(module_version) self.base.do_transaction()
def find_module_version(self, name, stream=None, version=None, context=None, arch=None): def use_enabled_stream(repo_module): if repo_module.conf.enabled._get(): return repo_module.conf.stream._get() return None def use_default_stream(repo_module): return repo_module.defaults.peek_default_stream() try: repo_module = self[name] stream = first_not_none([ stream, use_enabled_stream(repo_module), use_default_stream(repo_module) ]) if not stream: raise NoStreamSpecifiedException(name) repo_module_stream = repo_module[stream] if repo_module.conf and \ repo_module.conf.locked._get() and \ repo_module.conf.version._get() is not -1: if repo_module_stream.latest( ).version != repo_module.conf.version._get(): logger.info(module_messages[VERSION_LOCKED].format( "{}:{}".format(repo_module.name, stream), repo_module.conf.version._get())) repo_module_version = repo_module_stream[ repo_module.conf.version._get()] elif version: repo_module_version = repo_module_stream[version] else: # if version is not specified, pick the latest repo_module_version = repo_module_stream.latest() # TODO: arch # TODO: platform module except KeyError: return None return repo_module_version
def _get_info(self, module_specs): output = set() for module_spec in module_specs: module_list, nsvcap = self._get_modules(module_spec) if not module_list: logger.info( _("Unable to resolve argument {}").format(module_spec)) continue if nsvcap.profile: logger.info( _("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) for modulePackage in module_list: default_str, enabled_str, disabled_str = self._module_strs_formatter( modulePackage, markActive=True) default_profiles = self.base._moduleContainer.getDefaultProfiles( modulePackage.getName(), modulePackage.getStream()) profiles_str = self._profile_report_formatter( modulePackage, default_profiles, enabled_str) lines = OrderedDict() lines["Name"] = modulePackage.getName() lines["Stream"] = modulePackage.getStream() + default_str + enabled_str + \ disabled_str lines["Version"] = modulePackage.getVersion() lines["Context"] = modulePackage.getContext() lines["Architecture"] = modulePackage.getArch() lines["Profiles"] = profiles_str lines["Default profiles"] = " ".join(default_profiles) lines["Repo"] = modulePackage.getRepoID() lines["Summary"] = modulePackage.getSummary() lines["Description"] = modulePackage.getDescription() req_set = set() for req in modulePackage.getModuleDependencies(): for require_dict in req.getRequires(): for mod_require, stream in require_dict.items(): req_set.add("{}:[{}]".format( mod_require, ",".join(stream))) lines["Requires"] = "\n".join(sorted(req_set)) demodularized = modulePackage.getDemodularizedRpms() if demodularized: lines["Demodularized rpms"] = "\n".join(demodularized) lines["Artifacts"] = "\n".join( sorted(modulePackage.getArtifacts())) output.add(self._create_simple_table(lines).toString()) str_table = "\n\n".join(sorted(output)) if str_table: str_table += MODULE_INFO_TABLE_HINT return str_table
def _get_modules_from_name_stream_specs(self): modules_from_specs = set() for module_spec in self.opts.module_spec: __, nsvcap = self.module_base._get_modules(module_spec) name = nsvcap.name if nsvcap.name else "" stream = nsvcap.stream if nsvcap.stream else "" if (nsvcap.version and nsvcap.version != -1) or nsvcap.context: logger.info( _("Only module name, stream, architecture or profile is used. " "Ignoring unneeded information in argument: '{}'"). format(module_spec)) arch = nsvcap.arch if nsvcap.arch else "" modules = self.base._moduleContainer.query( name, stream, "", "", arch) modules_from_specs.update(modules) return modules_from_specs
def _get_full_info(self, module_specs): output = set() for module_spec in module_specs: module_list, nsvcap = self._get_modules(module_spec) if not module_list: logger.info(_("Unable to resolve argument {}").format(module_spec)) continue if nsvcap.profile: logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) for modulePackage in module_list: info = modulePackage.getYaml() if info: output.add(info) output_string = "\n\n".join(sorted(output)) return output_string
def get_info_profiles(self, module_spec): subj = ModuleSubject(module_spec) module_version, module_form = subj.find_module_version(self) if module_form.profile: logger.info("Ignoring unnecessary profile: '{}/{}'".format( module_form.name, module_form.profile)) lines = OrderedDict() lines["Name"] = module_version.full_version for profile in module_version.profiles: nevra_objects = module_version.profile_nevra_objects(profile) lines[profile] = "\n".join([ "{}-{}".format(nevra.name, nevra.evr()) for nevra in nevra_objects ]) return self.create_simple_table(lines)
def install(self, module_specs, strict=True): versions, module_specs = self.get_best_versions(module_specs) result = False for module_version, profiles, default_profiles in versions.values(): conf = module_version.repo_module.conf if conf.locked._get( ) and conf.version._get() != module_version.version: logger.warning(module_messages[VERSION_LOCKED].format( module_version.name, module_version.repo_module.conf.version._get())) continue self.enable("{}:{}".format(module_version.name, module_version.stream)) self.base.sack.reset_module_excludes() self.base.use_module_includes() for module_version, profiles, default_profiles in versions.values(): if module_version.version > module_version.repo_module.conf.version._get( ): profiles.extend( list(module_version.repo_module.conf.profiles._get())) profiles = list(set(profiles)) if profiles or default_profiles: result |= module_version.install(profiles, default_profiles, strict) if not result and versions and self.base._module_persistor: module_versions = [ "{}:{}".format(module_version.name, module_version.stream) for module_version, profiles, default_profiles in versions.values() ] self.base._module_persistor.commit() self.base._module_persistor.save() logger.info(module_messages[ENABLED_MODULES].format( ", ".join(module_versions))) return module_specs
def _get_info_profiles(self, module_specs): output = set() for module_spec in module_specs: module_list, nsvcap = self._get_modules(module_spec) if not module_list: logger.info(_("Unable to resolve argument {}").format(module_spec)) continue if nsvcap.profile: logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) for module in module_list: lines = OrderedDict() lines["Name"] = module.getFullIdentifier() for profile in module.getProfiles(): lines[profile.getName()] = "\n".join( [pkgName for pkgName in profile.getContent()]) output.add(self._create_simple_table(lines).toString()) return "\n\n".join(sorted(output))
def _get_info(self, module_specs): output = set() for module_spec in module_specs: module_list, nsvcap = self._get_modules(module_spec) if not module_list: logger.info(_("Unable to resolve argument {}").format(module_spec)) continue if nsvcap.profile: logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( nsvcap.name, nsvcap.profile)) for modulePackage in module_list: default_str, enabled_str, disabled_str = self._module_strs_formater( modulePackage, markActive=True) default_profiles = self.base._moduleContainer.getDefaultProfiles( modulePackage.getName(), modulePackage.getStream()) profiles_str = self._profile_report_formater( modulePackage, default_profiles, enabled_str) lines = OrderedDict() lines["Name"] = modulePackage.getName() lines["Stream"] = modulePackage.getStream() + default_str + enabled_str + \ disabled_str lines["Version"] = modulePackage.getVersion() lines["Context"] = modulePackage.getContext() lines["Architecture"] = modulePackage.getArch() lines["Profiles"] = profiles_str lines["Default profiles"] = " ".join(default_profiles) lines["Repo"] = modulePackage.getRepoID() lines["Summary"] = modulePackage.getSummary() lines["Description"] = modulePackage.getDescription() lines["Artifacts"] = "\n".join(sorted(modulePackage.getArtifacts())) output.add(self._create_simple_table(lines).toString()) str_table = "\n\n".join(sorted(output)) if str_table: str_table += MODULE_INFO_TABLE_HINT return str_table
def install(self, module_specs, strict=True): versions, module_specs = self.get_best_versions(module_specs) result = False for module_version, profiles, default_profiles in versions.values(): self.enable_by_version(module_version) self.base._moduleContainer.enable(module_version.name, module_version.stream) hot_fix_repos = [ i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes ] self.base.sack.filter_modules(self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot, None) for module_version, profiles, default_profiles in versions.values(): profiles.extend( list(module_version.repo_module.conf.profiles._get())) profiles = list(set(profiles)) if profiles or default_profiles: result |= module_version.install(profiles, default_profiles, strict) if not result and versions and self.base._module_persistor: module_versions = [ "{}:{}".format(module_version.name, module_version.stream) for module_version, profiles, default_profiles in versions.values() ] self.base._module_persistor.commit() self.base._module_persistor.save() logger.info(module_messages[ENABLED_MODULES].format( ", ".join(module_versions))) return module_specs
def _modules_reset_or_disable(self, module_specs, to_state): no_match_specs = [] for spec in module_specs: module_list, nsvcap = self._get_modules(spec) if not module_list: logger.error(_("Unable to resolve argument {}").format(spec)) no_match_specs.append(spec) continue if nsvcap.stream or nsvcap.version or nsvcap.context or nsvcap.arch or nsvcap.profile: logger.info( _("Only module name is required. " "Ignoring unneeded information in argument: '{}'"). format(spec)) module_names = set() for module in module_list: module_names.add(module.getName()) for name in module_names: if to_state == STATE_UNKNOWN: self.base._moduleContainer.reset(name) if to_state == STATE_DISABLED: self.base._moduleContainer.disable(name) solver_errors = self._update_sack() return no_match_specs, solver_errors
def install(self, module_specs, strict=True): versions, module_specs = self.get_best_versions(module_specs) result = False for module_version, profiles, default_profiles in versions.values(): self.enable_by_version(module_version) self.base._moduleContainer.enable(module_version.name, module_version.stream) hot_fix_repos = [ i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes ] self.base.sack.filter_modules(self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot, self.base.conf.module_platform_id, update_only=True) for module_version, profiles, default_profiles in versions.values(): profiles = sorted(set(profiles)) default_profiles = sorted(set(default_profiles)) if profiles or default_profiles: result |= module_version.install(profiles, default_profiles, strict) if not result and versions: module_versions = [ "{}:{}".format(module_version.name, module_version.stream) for module_version, profiles, default_profiles in versions.values() ] logger.info(module_messages[ENABLED_MODULES].format( ", ".join(module_versions))) return module_specs
def run_on_module(self): for module_ns in self.opts.module_nsvp: self.base.repo_module_dict.enable(module_ns, True) logger.info("'{}' is enabled".format(module_ns))
def transformSwdb(input_dir, output_file): yumdb_path = os.path.join(input_dir, 'yumdb') history_path = os.path.join(input_dir, 'history') groups_path = os.path.join(input_dir, 'groups.json') state_dict = {} repo_dict = {} # create binding with STATE_TYPE - returns ID def bind_state(cursor, desc): code = state_dict.get(desc) if code: return code cursor.execute('SELECT state FROM STATE_TYPE WHERE description=?', (desc, )) state_id = cursor.fetchone() if state_id is None: cursor.execute('INSERT INTO STATE_TYPE VALUES(null,?)', (desc, )) cursor.execute('SELECT last_insert_rowid()') state_id = cursor.fetchone() state_dict[desc] = state_id[0] return state_id[0] # create binding with repo - returns R_ID def bind_repo(cursor, name): code = repo_dict.get(name) if code: return code cursor.execute('SELECT R_ID FROM REPO WHERE name=?', (name, )) rid = cursor.fetchone() if rid is None: cursor.execute('INSERT INTO REPO VALUES(null,?)', (name, )) cursor.execute('SELECT last_insert_rowid()') rid = cursor.fetchone() repo_dict[name] = rid[0] return rid[0] # check path to yumdb dir if not os.path.isdir(yumdb_path): logger.error(_('Error: yumdb directory not valid')) return False # check path to history dir if not os.path.isdir(history_path): logger.error(_('Error: history directory not valid')) return False # check historyDB file and pick newest one historydb_file = glob.glob(os.path.join(history_path, "history*")) if len(historydb_file) < 1: logger.error(_('Error: history database file not valid')) return False historydb_file.sort() historydb_file = historydb_file[0] if not os.path.isfile(historydb_file): logger.error(_('Error: history database file not valid')) return False tmp_output_file = output_file + '.transform' try: # initialise historyDB historyDB = sqlite3.connect(historydb_file) h_cursor = historyDB.cursor() except: logger.error( _("ERROR: unable to open the database '{}'").format( historydb_file)) return False try: # initialise output DB os.rename(output_file, tmp_output_file) database = sqlite3.connect(tmp_output_file) cursor = database.cursor() except: logger.error( _("ERROR: unable to create the database '{}'").format( tmp_output_file)) return False # value distribution in tables PACKAGE_DATA = [ 'P_ID', 'R_ID', 'from_repo_revision', 'from_repo_timestamp', 'installed_by', 'changed_by' ] TRANS_DATA = [ 'T_ID', 'PD_ID', 'TG_ID', 'done', 'obsoleting', 'reason', 'state' ] GROUPS = ['name_id', 'name', 'ui_name', 'installed', 'pkg_types'] ENVIRONMENTS = ['name_id', 'name', 'ui_name', 'pkg_types', 'grp_types'] logger.info( _("Transforming the software database. It may take some time.")) # contruction of PACKAGE from pkgtups h_cursor.execute('SELECT * FROM pkgtups') for row in h_cursor: record_P = [ row[0], # P_ID row[1], # name row[3], # epoch row[4], # version row[5], # release row[2] # arch ] if row[6]: checksum_type, checksum_data = row[6].split(":", 2) record_P.append(checksum_data) record_P.append(checksum_type) else: record_P += ['', ''] record_P.append(SwdbItem.RPM) # type cursor.execute('INSERT INTO PACKAGE VALUES (?,?,?,?,?,?,?,?,?)', record_P) # save changes database.commit() # construction of PACKAGE_DATA according to pkg_yumdb actualPID = 0 record_PD = [''] * len(PACKAGE_DATA) h_cursor.execute('SELECT * FROM pkg_yumdb') # for each row in pkg_yumdb for row in h_cursor: newPID = row[0] if actualPID != newPID: if actualPID != 0: record_PD[0] = actualPID # insert new record into PACKAGE_DATA PACKAGE_DATA_INSERT(cursor, record_PD) actualPID = newPID record_PD = [''] * len(PACKAGE_DATA) if row[1] in PACKAGE_DATA: # collect data for record from pkg_yumdb record_PD[PACKAGE_DATA.index(row[1])] = row[2] elif row[1] == "from_repo": # create binding with REPO table record_PD[1] = bind_repo(cursor, row[2]) record_PD[0] = actualPID PACKAGE_DATA_INSERT(cursor, record_PD) # insert last record # save changes database.commit() # prepare pid to pdid dictionary cursor.execute("SELECT PD_ID, P_ID FROM PACKAGE_DATA") pid_to_pdid = {} for row in cursor: pid_to_pdid[row[1]] = row[0] obsoleting_pkgs = [] # trans_data construction h_cursor.execute('SELECT tid, pkgtupid, done, state FROM trans_data_pkgs') for row in h_cursor: state = row[3] pid = int(row[1]) tid = int(row[0]) # handle Obsoleting packages - save it as separate attribute if state == 'Obsoleting': obsoleting_pkgs.append((tid, pid)) continue data = [''] * len(TRANS_DATA) pdid = pid_to_pdid.get(pid, 0) if not pdid: # create new entry cursor.execute( "INSERT INTO PACKAGE_DATA VALUES (null,?,'','','','','')", (pid, )) cursor.execute('SELECT last_insert_rowid()') pdid = cursor.fetchone()[0] else: # use this entry and delete it from the DB del pid_to_pdid[pid] # insert trans_data record data[TRANS_DATA.index('state')] = bind_state(cursor, state) data[TRANS_DATA.index('PD_ID')] = pdid data[TRANS_DATA.index('done')] = 1 if row[2] == 'TRUE' else 0 data[0] = row[0] cursor.execute('INSERT INTO TRANS_DATA VALUES (null,?,?,?,?,?,?,?)', data) update_cmd = """UPDATE TRANS_DATA SET obsoleting=1 WHERE TD_ID IN ( SELECT TD_ID FROM PACKAGE_DATA JOIN TRANS_DATA using (PD_ID) WHERE T_ID=? and P_ID=?)""" # set flag for Obsoleting PD_IDs for keys in obsoleting_pkgs: cursor.execute(update_cmd, keys) # save changes database.commit() trans_cmd = """SELECT tid, trans_beg.timestamp, trans_end.timestamp, trans_beg.rpmdb_version, trans_end.rpmdb_version, cmdline, loginuid, null, return_code FROM trans_beg join trans_end using(tid) join trans_cmdline using(tid)""" # Construction of TRANS h_cursor.execute(trans_cmd) for row in h_cursor: # override empty releasever r = list(row) del r[7] cursor.execute("INSERT INTO TRANS VALUES (?,?,?,?,?,?,?,'',?)", r) # get releasever for transactions cursor.execute('SELECT T_ID FROM TRANS WHERE releasever=?', ('', )) missing = cursor.fetchall() for row in missing: tid = row[0] cmd = "SELECT P_ID FROM TRANS_DATA join PACKAGE_DATA using (PD_ID) WHERE T_ID=? LIMIT 1" cursor.execute(cmd, (tid, )) pids = cursor.fetchall() for pid in pids: h_cursor.execute( """SELECT yumdb_val FROM pkg_yumdb WHERE pkgtupid=? AND yumdb_key='releasever' LIMIT 1""", pid) rlsver = h_cursor.fetchone() if rlsver: cursor.execute("UPDATE TRANS SET releasever=? WHERE T_ID=?", (rlsver[0], tid)) break # collect reasons cursor.execute( """SELECT TD_ID, P_ID FROM TRANS_DATA join PACKAGE_DATA using(PD_ID) join PACKAGE using(P_ID)""") missing = cursor.fetchall() for row in missing: h_cursor.execute( """SELECT yumdb_val FROM pkg_yumdb WHERE pkgtupid=? AND yumdb_key='reason' LIMIT 1""", (row[1], )) reason = h_cursor.fetchone() if reason: t_reason = convert_reason(reason[0]) cursor.execute('UPDATE TRANS_DATA SET reason=? WHERE TD_ID=?', (t_reason, row[0])) # fetch additional data from yumdb get_yumdb_packages(cursor, yumdb_path, bind_repo) # contruction of OUTPUT h_cursor.execute('SELECT * FROM trans_script_stdout') for row in h_cursor: cursor.execute('INSERT INTO OUTPUT VALUES (null,?,?,?)', (row[1], row[2], BIND_OUTPUT(cursor, 'stdout'))) h_cursor.execute('SELECT * FROM trans_error') for row in h_cursor: cursor.execute('INSERT INTO OUTPUT VALUES (null,?,?,?)', (row[1], row[2], BIND_OUTPUT(cursor, 'stderr'))) # construction of GROUPS if os.path.isfile(groups_path): with open(groups_path) as groups_file: data = json.load(groups_file) for key in data: if key == 'GROUPS': for value in data[key]: record_G = [''] * len(GROUPS) record_G[GROUPS.index('name_id')] = value if 'name' in data[key][value]: record_G[GROUPS.index( 'name')] = data[key][value]['name'] record_G[GROUPS.index( 'pkg_types')] = data[key][value]['pkg_types'] record_G[GROUPS.index('installed')] = True if 'ui_name' in data[key][value]: record_G[GROUPS.index( 'ui_name')] = data[key][value]['ui_name'] cursor.execute( '''INSERT INTO GROUPS VALUES (null,?,?,?,?,?)''', (record_G)) cursor.execute('SELECT last_insert_rowid()') tmp_gid = cursor.fetchone()[0] for package in data[key][value]['full_list']: ADD_GROUPS_PACKAGE(cursor, tmp_gid, package) for package in data[key][value]['pkg_exclude']: ADD_GROUPS_EXCLUDE(cursor, tmp_gid, package) for key in data: if key == 'ENVIRONMENTS': for value in data[key]: record_E = [''] * len(ENVIRONMENTS) record_E[GROUPS.index('name_id')] = value if 'name' in data[key][value]: record_G[GROUPS.index( 'name')] = data[key][value]['name'] record_E[ENVIRONMENTS.index( 'grp_types')] = data[key][value]['grp_types'] record_E[ENVIRONMENTS.index( 'pkg_types')] = data[key][value]['pkg_types'] if 'ui_name' in data[key][value]: record_E[ENVIRONMENTS.index( 'ui_name')] = data[key][value]['ui_name'] cursor.execute( '''INSERT INTO ENVIRONMENTS VALUES (null,?,?,?,?,?)''', (record_E)) cursor.execute('SELECT last_insert_rowid()') tmp_eid = cursor.fetchone()[0] for package in data[key][value]['full_list']: BIND_ENV_GROUP(cursor, tmp_eid, package) for package in data[key][value]['pkg_exclude']: ADD_ENV_EXCLUDE(cursor, tmp_eid, package) # construction of TRANS_GROUP_DATA from GROUPS cursor.execute('SELECT * FROM GROUPS') tmp_groups = cursor.fetchall() for row in tmp_groups: command = [] for pattern in row[1:4]: if pattern: command.append("cmdline LIKE '%{}%'".format(pattern)) if command: cursor.execute("SELECT T_ID FROM TRANS WHERE " + " or ".join(command)) tmp_trans = cursor.fetchall() if tmp_trans: for single_trans in tmp_trans: data = (single_trans[0], row[0], row[1], row[2], row[3], row[4], row[5]) cursor.execute( "INSERT INTO TRANS_GROUP_DATA VALUES(null,?,?,?,?,?,?,?)", data) # construction of TRANS_GROUP_DATA from ENVIRONMENTS cursor.execute('SELECT * FROM ENVIRONMENTS WHERE ui_name!=?', ('', )) tmp_env = cursor.fetchall() for row in tmp_env: command = [] for pattern in row[1:4]: if pattern: command.append("cmdline LIKE '%{}%'".format(pattern)) if command: cursor.execute("SELECT T_ID FROM TRANS WHERE " + " or ".join(command)) tmp_trans = cursor.fetchall() if tmp_trans: for trans in tmp_trans: cursor.execute( "SELECT G_ID FROM ENVIRONMENTS_GROUPS WHERE E_ID=?", (row[0], )) tmp_groups = cursor.fetchall() for gid in tmp_groups: cursor.execute("SELECT * FROM GROUPS WHERE G_ID=?", (gid[0], )) data = cursor.fetchone() tgdata = (trans[0], data[0], data[1], data[2], data[3], data[4], data[5]) cursor.execute( "INSERT INTO TRANS_GROUP_DATA VALUES(null,?,?,?,?,?,?,?)", tgdata) # create Transaction performed with package h_cursor.execute('SELECT tid, pkgtupid FROM trans_with_pkgs') for row in h_cursor: cursor.execute('INSERT INTO TRANS_WITH VALUES (null,?,?)', row) # save changes database.commit() # close connection database.close() historyDB.close() # successful os.rename(tmp_output_file, output_file) return True
def transformSwdb(input_dir, output_file): yumdb_path = os.path.join(input_dir, 'yumdb') history_path = os.path.join(input_dir, 'history') groups_path = os.path.join(input_dir, 'groups.json') state_dict = {} repo_dict = {} # create binding with STATE_TYPE - returns ID def bind_state(cursor, desc): code = state_dict.get(desc) if code: return code cursor.execute('SELECT state FROM STATE_TYPE WHERE description=?', (desc, )) state_id = cursor.fetchone() if state_id is None: cursor.execute('INSERT INTO STATE_TYPE VALUES(null,?)', (desc, )) cursor.execute('SELECT last_insert_rowid()') state_id = cursor.fetchone() state_dict[desc] = state_id[0] return state_id[0] # create binding with repo - returns R_ID def bind_repo(cursor, name): code = repo_dict.get(name) if code: return code cursor.execute('SELECT R_ID FROM REPO WHERE name=?', (name, )) rid = cursor.fetchone() if rid is None: cursor.execute('INSERT INTO REPO VALUES(null,?)', (name, )) cursor.execute('SELECT last_insert_rowid()') rid = cursor.fetchone() repo_dict[name] = rid[0] return rid[0] # check path to yumdb dir if not os.path.isdir(yumdb_path): logger.error(_('Error: yumdb directory not valid')) return False # check path to history dir if not os.path.isdir(history_path): logger.error(_('Error: history directory not valid')) return False # check historyDB file and pick newest one historydb_file = glob.glob(os.path.join(history_path, "history*")) if len(historydb_file) < 1: logger.error(_('Error: history database file not valid')) return False historydb_file.sort() historydb_file = historydb_file[0] if not os.path.isfile(historydb_file): logger.error(_('Error: history database file not valid')) return False tmp_output_file = output_file + '.transform' try: # initialise historyDB historyDB = sqlite3.connect(historydb_file) h_cursor = historyDB.cursor() except: logger.error(_("ERROR: unable to open the database '{}'").format(historydb_file)) return False try: # initialise output DB os.rename(output_file, tmp_output_file) database = sqlite3.connect(tmp_output_file) cursor = database.cursor() except: logger.error(_("ERROR: unable to create the database '{}'").format(tmp_output_file)) return False # value distribution in tables PACKAGE_DATA = ['P_ID', 'R_ID', 'from_repo_revision', 'from_repo_timestamp', 'installed_by', 'changed_by'] TRANS_DATA = ['T_ID', 'PD_ID', 'TG_ID', 'done', 'obsoleting', 'reason', 'state'] GROUPS = ['name_id', 'name', 'ui_name', 'installed', 'pkg_types'] ENVIRONMENTS = ['name_id', 'name', 'ui_name', 'pkg_types', 'grp_types'] logger.info(_("Transforming the software database. It may take some time.")) # contruction of PACKAGE from pkgtups h_cursor.execute('SELECT * FROM pkgtups') for row in h_cursor: record_P = [ row[0], # P_ID row[1], # name row[3], # epoch row[4], # version row[5], # release row[2] # arch ] if row[6]: checksum_type, checksum_data = row[6].split(":", 2) record_P.append(checksum_data) record_P.append(checksum_type) else: record_P += ['', ''] record_P.append(SwdbItem.RPM) # type cursor.execute('INSERT INTO PACKAGE VALUES (?,?,?,?,?,?,?,?,?)', record_P) # save changes database.commit() # construction of PACKAGE_DATA according to pkg_yumdb actualPID = 0 record_PD = [''] * len(PACKAGE_DATA) h_cursor.execute('SELECT * FROM pkg_yumdb') # for each row in pkg_yumdb for row in h_cursor: newPID = row[0] if actualPID != newPID: if actualPID != 0: record_PD[0] = actualPID # insert new record into PACKAGE_DATA PACKAGE_DATA_INSERT(cursor, record_PD) actualPID = newPID record_PD = [''] * len(PACKAGE_DATA) if row[1] in PACKAGE_DATA: # collect data for record from pkg_yumdb record_PD[PACKAGE_DATA.index(row[1])] = row[2] elif row[1] == "from_repo": # create binding with REPO table record_PD[1] = bind_repo(cursor, row[2]) record_PD[0] = actualPID PACKAGE_DATA_INSERT(cursor, record_PD) # insert last record # save changes database.commit() # prepare pid to pdid dictionary cursor.execute("SELECT PD_ID, P_ID FROM PACKAGE_DATA") pid_to_pdid = {} for row in cursor: pid_to_pdid[row[1]] = row[0] obsoleting_pkgs = [] # trans_data construction h_cursor.execute('SELECT tid, pkgtupid, done, state FROM trans_data_pkgs') for row in h_cursor: state = row[3] pid = int(row[1]) tid = int(row[0]) # handle Obsoleting packages - save it as separate attribute if state == 'Obsoleting': obsoleting_pkgs.append((tid, pid)) continue data = [''] * len(TRANS_DATA) pdid = pid_to_pdid.get(pid, 0) if not pdid: # create new entry cursor.execute("INSERT INTO PACKAGE_DATA VALUES (null,?,'','','','','')", (pid,)) cursor.execute('SELECT last_insert_rowid()') pdid = cursor.fetchone()[0] else: # use this entry and delete it from the DB del pid_to_pdid[pid] # insert trans_data record data[TRANS_DATA.index('state')] = bind_state(cursor, state) data[TRANS_DATA.index('PD_ID')] = pdid data[TRANS_DATA.index('done')] = 1 if row[2] == 'TRUE' else 0 data[0] = row[0] cursor.execute('INSERT INTO TRANS_DATA VALUES (null,?,?,?,?,?,?,?)', data) update_cmd = """UPDATE TRANS_DATA SET obsoleting=1 WHERE TD_ID IN ( SELECT TD_ID FROM PACKAGE_DATA JOIN TRANS_DATA using (PD_ID) WHERE T_ID=? and P_ID=?)""" # set flag for Obsoleting PD_IDs for keys in obsoleting_pkgs: cursor.execute(update_cmd, keys) # save changes database.commit() trans_cmd = """SELECT tid, trans_beg.timestamp, trans_end.timestamp, trans_beg.rpmdb_version, trans_end.rpmdb_version, cmdline, loginuid, null, return_code FROM trans_beg join trans_end using(tid) join trans_cmdline using(tid)""" # Construction of TRANS h_cursor.execute(trans_cmd) for row in h_cursor: # override empty releasever r = list(row) del r[7] cursor.execute("INSERT INTO TRANS VALUES (?,?,?,?,?,?,?,'',?)", r) # get releasever for transactions cursor.execute('SELECT T_ID FROM TRANS WHERE releasever=?', ('', )) missing = cursor.fetchall() for row in missing: tid = row[0] cmd = "SELECT P_ID FROM TRANS_DATA join PACKAGE_DATA using (PD_ID) WHERE T_ID=? LIMIT 1" cursor.execute(cmd, (tid,)) pids = cursor.fetchall() for pid in pids: h_cursor.execute("""SELECT yumdb_val FROM pkg_yumdb WHERE pkgtupid=? AND yumdb_key='releasever' LIMIT 1""", pid) rlsver = h_cursor.fetchone() if rlsver: cursor.execute("UPDATE TRANS SET releasever=? WHERE T_ID=?", (rlsver[0], tid)) break # collect reasons cursor.execute("""SELECT TD_ID, P_ID FROM TRANS_DATA join PACKAGE_DATA using(PD_ID) join PACKAGE using(P_ID)""") missing = cursor.fetchall() for row in missing: h_cursor.execute("""SELECT yumdb_val FROM pkg_yumdb WHERE pkgtupid=? AND yumdb_key='reason' LIMIT 1""", (row[1],)) reason = h_cursor.fetchone() if reason: t_reason = convert_reason(reason[0]) cursor.execute('UPDATE TRANS_DATA SET reason=? WHERE TD_ID=?', (t_reason, row[0])) # fetch additional data from yumdb get_yumdb_packages(cursor, yumdb_path, bind_repo) # contruction of OUTPUT h_cursor.execute('SELECT * FROM trans_script_stdout') for row in h_cursor: cursor.execute('INSERT INTO OUTPUT VALUES (null,?,?,?)', (row[1], row[2], BIND_OUTPUT(cursor, 'stdout'))) h_cursor.execute('SELECT * FROM trans_error') for row in h_cursor: cursor.execute('INSERT INTO OUTPUT VALUES (null,?,?,?)', (row[1], row[2], BIND_OUTPUT(cursor, 'stderr'))) # construction of GROUPS if os.path.isfile(groups_path): with open(groups_path) as groups_file: data = json.load(groups_file) for key in data: if key == 'GROUPS': for value in data[key]: record_G = [''] * len(GROUPS) record_G[GROUPS.index('name_id')] = value if 'name' in data[key][value]: record_G[GROUPS.index('name')] = data[key][value]['name'] record_G[GROUPS.index('pkg_types')] = data[key][value]['pkg_types'] record_G[GROUPS.index('installed')] = True if 'ui_name' in data[key][value]: record_G[GROUPS.index('ui_name')] = data[key][value]['ui_name'] cursor.execute('''INSERT INTO GROUPS VALUES (null,?,?,?,?,?)''', (record_G)) cursor.execute('SELECT last_insert_rowid()') tmp_gid = cursor.fetchone()[0] for package in data[key][value]['full_list']: ADD_GROUPS_PACKAGE(cursor, tmp_gid, package) for package in data[key][value]['pkg_exclude']: ADD_GROUPS_EXCLUDE(cursor, tmp_gid, package) for key in data: if key == 'ENVIRONMENTS': for value in data[key]: record_E = [''] * len(ENVIRONMENTS) record_E[GROUPS.index('name_id')] = value if 'name' in data[key][value]: record_G[GROUPS.index('name')] = data[key][value]['name'] record_E[ENVIRONMENTS.index('grp_types')] = data[key][value]['grp_types'] record_E[ENVIRONMENTS.index('pkg_types')] = data[key][value]['pkg_types'] if 'ui_name' in data[key][value]: record_E[ENVIRONMENTS.index('ui_name')] = data[key][value]['ui_name'] cursor.execute('''INSERT INTO ENVIRONMENTS VALUES (null,?,?,?,?,?)''', (record_E)) cursor.execute('SELECT last_insert_rowid()') tmp_eid = cursor.fetchone()[0] for package in data[key][value]['full_list']: BIND_ENV_GROUP(cursor, tmp_eid, package) for package in data[key][value]['pkg_exclude']: ADD_ENV_EXCLUDE(cursor, tmp_eid, package) # construction of TRANS_GROUP_DATA from GROUPS cursor.execute('SELECT * FROM GROUPS') tmp_groups = cursor.fetchall() for row in tmp_groups: command = [] for pattern in row[1:4]: if pattern: command.append("cmdline LIKE '%{}%'".format(pattern)) if command: cursor.execute("SELECT T_ID FROM TRANS WHERE " + " or ".join(command)) tmp_trans = cursor.fetchall() if tmp_trans: for single_trans in tmp_trans: data = (single_trans[0], row[0], row[1], row[2], row[3], row[4], row[5]) cursor.execute("INSERT INTO TRANS_GROUP_DATA VALUES(null,?,?,?,?,?,?,?)", data) # construction of TRANS_GROUP_DATA from ENVIRONMENTS cursor.execute('SELECT * FROM ENVIRONMENTS WHERE ui_name!=?', ('', )) tmp_env = cursor.fetchall() for row in tmp_env: command = [] for pattern in row[1:4]: if pattern: command.append("cmdline LIKE '%{}%'".format(pattern)) if command: cursor.execute("SELECT T_ID FROM TRANS WHERE " + " or ".join(command)) tmp_trans = cursor.fetchall() if tmp_trans: for trans in tmp_trans: cursor.execute("SELECT G_ID FROM ENVIRONMENTS_GROUPS WHERE E_ID=?", (row[0],)) tmp_groups = cursor.fetchall() for gid in tmp_groups: cursor.execute("SELECT * FROM GROUPS WHERE G_ID=?", (gid[0],)) data = cursor.fetchone() tgdata = (trans[0], data[0], data[1], data[2], data[3], data[4], data[5]) cursor.execute("INSERT INTO TRANS_GROUP_DATA VALUES(null,?,?,?,?,?,?,?)", tgdata) # create Transaction performed with package h_cursor.execute('SELECT tid, pkgtupid FROM trans_with_pkgs') for row in h_cursor: cursor.execute('INSERT INTO TRANS_WITH VALUES (null,?,?)', row) # save changes database.commit() # close connection database.close() historyDB.close() # successful os.rename(tmp_output_file, output_file) return True
def run_on_module(self): logger.info( self.base.repo_module_dict.get_brief_description_enabled( self.opts.module_spec))
def run_on_module(self): for spec in self.opts.module_spec: print() logger.info(self.base.repo_module_dict.get_info_profiles(spec))
def exit_dnf(message): logger.info(message) sys.exit(0)
def get_best_versions(self, module_specs): best_versions = {} skipped = [] for module_spec in module_specs: subj = ModuleSubject(module_spec) try: module_version, module_form = subj.find_module_version(self) except NoModuleException: skipped.append(module_spec) continue key = module_version.name if key in best_versions: best_version, profiles, default_profiles = best_versions[key] if best_version.stream != module_version.stream: raise EnableMultipleStreamsException(module_version.name) if module_form.profile: profiles.append(module_form.profile) else: stream = module_form.stream or module_version.repo_module.defaults \ .peek_default_stream() profile_defaults = module_version.repo_module.defaults.peek_profile_defaults( ) if stream in profile_defaults: default_profiles.extend(profile_defaults[stream].dup()) if best_version < module_version: logger.info( module_messages[INSTALLING_NEWER_VERSION].format( best_version, module_version)) best_versions[key] = [ module_version, profiles, default_profiles ] else: best_versions[key] = [ best_version, profiles, default_profiles ] else: default_profiles = [] profiles = [] stream = module_form.stream or module_version.repo_module.defaults \ .peek_default_stream() profile_defaults = module_version.repo_module.defaults.peek_profile_defaults( ) if stream in profile_defaults: default_profiles.extend(profile_defaults[stream].dup()) if module_form.profile: profiles = [module_form.profile] elif default_profiles: profiles = [] else: default_profiles = ['default'] best_versions[key] = [ module_version, profiles, default_profiles ] return best_versions, skipped
def run_transaction(self): if self.opts.subcmd[0] in ('enable',): logger.info(_("\nSwitching module streams does not alter installed packages " "(see 'module enable' in dnf(8) for details)"))