def prepare_pool(self, arch, ignore_conflicts): pool = solv.Pool() pool.setarch(arch) self.lockjobs[arch] = [] solvables = set() for project, reponame in self.repos: repo = pool.add_repo(project) s = os.path.join(CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, reponame, arch)) r = repo.add_solv(s) if not r: if not self.did_update: raise Exception( 'failed to add repo {}/{}/{}. Need to run update first?'.format(project, reponame, arch)) continue for solvable in repo.solvables_iter(): if ignore_conflicts: solvable.unset(solv.SOLVABLE_CONFLICTS) solvable.unset(solv.SOLVABLE_OBSOLETES) # only take the first solvable in the repo chain if solvable.name in solvables: self.lockjobs[arch].append(pool.Job(solv.Job.SOLVER_SOLVABLE | solv.Job.SOLVER_LOCK, solvable.id)) solvables.add(solvable.name) pool.addfileprovides() pool.createwhatprovides() for l in self.locales: pool.set_namespaceproviders(solv.NAMESPACE_LANGUAGE, pool.Dep(l), True) return pool
def __init__(self): """Solver Init.""" self._finalized = False self._pool = solv.Pool() self._pool.setarch( ) # prevent https://github.com/openSUSE/libsolv/issues/267 self.mapping = UnitSolvableMapping()
def __init__(self): """Solver Init.""" self._finalized = False self._pool = solv.Pool() self._pool.setarch() # prevent https://github.com/openSUSE/libsolv/issues/267 self._pool.set_flag(solv.Pool.POOL_FLAG_IMPLICITOBSOLETEUSESCOLORS, 1) self.mapping = UnitSolvableMapping()
def __init__(self, repos, pkgs_in=None, quiet=True): self._override_sigchecks = False self.quiet = quiet self.pkgs = pkgs_in or [] self.repos = repos self.pool = solv.Pool() self.setup()
def __init__(self): self.pool = solv.Pool() self.pool.setarch("x86_64") self.build_repo = self.pool.add_repo("build") self.available_repo = self.pool.add_repo("available") # Solvable objects representing modules stored in a list grouped by # the name:stream. self.solvables = {}
def __init__(self, repos, packages, arch=None): """ :param repos: An iterable of :py:class:`rpmdeplint.repodata.Repo` instances :param packages: An iterable of RPM package paths to be tested """ # delayed import to avoid circular dependency from rpmdeplint.repodata import RepoDownloadError self.pool = solv.Pool() self.pool.setarch(arch) #: List of :py:class:`solv.Solvable` to be tested (corresponding to *packages* parameter) self.solvables = [] self.commandline_repo = self.pool.add_repo('@commandline') for rpmpath in packages: solvable = self.commandline_repo.add_rpm(rpmpath) if solvable is None: # pool.errstr is already prefixed with the filename raise UnreadablePackageError('Failed to read package: %s' % self.pool.errstr) self.solvables.append(solvable) self.repos_by_name = { } #: Mapping of {repo name: :py:class:`rpmdeplint.repodata.Repo`} for repo in repos: try: repo.download_repodata() except RepoDownloadError as e: if repo.skip_if_unavailable: logger.warn('Skipping repo %s: %s', repo.name, e) continue else: raise solv_repo = self.pool.add_repo(repo.name) # solv.xfopen does not accept unicode filenames on Python 2 solv_repo.add_rpmmd( solv.xfopen_fd(str(repo.primary_url), repo.primary.fileno()), None) solv_repo.add_rpmmd( solv.xfopen_fd(str(repo.filelists_url), repo.filelists.fileno()), None, solv.Repo.REPO_EXTEND_SOLVABLES) self.repos_by_name[repo.name] = repo self.pool.addfileprovides() self.pool.createwhatprovides() # Special handling for "installonly" packages: we create jobs to mark # installonly package names as "multiversion" and then set those as # pool jobs, which means the jobs are automatically applied whenever we # run the solver on this pool. multiversion_jobs = [] for name in installonlypkgs: selection = self.pool.select(name, solv.Selection.SELECTION_PROVIDES) multiversion_jobs.extend( selection.jobs(solv.Job.SOLVER_MULTIVERSION)) self.pool.setpooljobs(multiversion_jobs)
def do_dump_solv(self, subcmd, opts, baseurl): """${cmd_name}: fetch repomd and dump solv If an output directory is specified, a file named according to the build is created there. Otherwise the solv file is dumped to stdout. ${cmd_usage} ${cmd_option_list} """ name = None ofh = sys.stdout if self.options.output_dir: url = urlparse.urljoin(baseurl, 'media.1/media') with requests.get(url) as media: for i, line in enumerate(media.iter_lines()): if i != 1: continue name = line if name is None or '-Build' not in name: raise Exception('media.1/media includes no build number') name = '{}/{}.solv'.format(self.options.output_dir, name) if not opts.overwrite and os.path.exists(name): logger.info("%s exists", name) return ofh = open(name + '.new', 'w') pool = solv.Pool() pool.setarch() repo = pool.add_repo(''.join( random.choice(string.letters) for _ in range(5))) f = tempfile.TemporaryFile() url = urlparse.urljoin(baseurl, 'repodata/repomd.xml') repomd = requests.get(url) ns = {'r': 'http://linux.duke.edu/metadata/repo'} root = ET.fromstring(repomd.content) location = root.find('.//r:data[@type="primary"]/r:location', ns).get('href') f.write(repomd.content) os.lseek(f.fileno(), 0, os.SEEK_SET) repo.add_repomdxml(f, 0) url = urlparse.urljoin(baseurl, location) with requests.get(url, stream=True) as primary: content = gzip.GzipFile(fileobj=StringIO(primary.content)) os.lseek(f.fileno(), 0, os.SEEK_SET) f.write(content.read()) os.lseek(f.fileno(), 0, os.SEEK_SET) # TODO: verify checksum repo.add_rpmmd(f, None, 0) repo.create_stubs() repo.write(ofh) if name is not None: os.rename(name + '.new', name)
def prepare_pool(self, repos): self.pool = solv.Pool() self.pool.setarch(self.options.arch) self._read_repos(repos) if self.options.system: self._add_system_repo() self.pool.addfileprovides() self.pool.createwhatprovides()
def _prepare_pool(self, arch): pool = solv.Pool() pool.setarch(arch) self.lockjobs[arch] = [] solvables = set() def cb(name, evr): ret = 0 if name == solv.NAMESPACE_MODALIAS: ret = 1 elif name == solv.NAMESPACE_FILESYSTEM: ret = 1 elif name == solv.NAMESPACE_LANGUAGE: if pool.id2str(evr) in self.locales: ret = 1 else: logger.warning('unhandled "{} {}"'.format( pool.id2str(name), pool.id2str(evr))) return ret if hasattr(pool, 'set_namespacecallback'): pool.set_namespacecallback(cb) else: logger.warn('libsolv missing namespace callback') for prp in self.repos: project, reponame = prp.split('/') repo = pool.add_repo(project) s = os.path.join( CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, reponame, arch)) r = repo.add_solv(s) if not r: raise Exception( "failed to add repo {}/{}/{}. Need to run update first?". format(project, reponame, arch)) for solvable in repo.solvables_iter(): if solvable.name in solvables: self.lockjobs[arch].append( pool.Job( solv.Job.SOLVER_SOLVABLE | solv.Job.SOLVER_LOCK, solvable.id)) solvables.add(solvable.name) pool.addfileprovides() pool.createwhatprovides() return pool
def dump_solv(name, baseurl): pool = solv.Pool() pool.setarch() repo = pool.add_repo(''.join(random.choice(string.ascii_letters) for _ in range(5))) if not parse_repomd(repo, baseurl) and not parse_susetags(repo, baseurl): raise Exception('neither repomd nor susetags exists in ' + baseurl) repo.create_stubs() ofh = solv.xfopen(name, 'w') repo.write(ofh) ofh.flush() return name
def raw_list_packages(self, filters=None): if not self.repo.is_configured: self.setup_repo(self.repo) pool = solv.Pool() repo = pool.add_repo(str(self.channel_label or self.reponame)) solv_path = os.path.join(self.repo.root, ZYPP_SOLV_CACHE_PATH, self.channel_label or self.reponame, 'solv') if not os.path.isfile(solv_path) or not repo.add_solv(solv.xfopen(str(solv_path)), 0): raise SolvFileNotFound(solv_path) rawpkglist = [] for solvable in repo.solvables_iter(): # Solvables with ":" in name are not packages if ':' in solvable.name: continue rawpkglist.append(RawSolvablePackage(solvable)) self.num_packages = len(rawpkglist) return rawpkglist
def merge_susetags(output, files): pool = solv.Pool() pool.setarch() for file in files: oldsysrepo = pool.add_repo(file) defvendorid = oldsysrepo.meta.lookup_id(solv.SUSETAGS_DEFAULTVENDOR) f = tempfile.TemporaryFile() st = subprocess.call(['xz', '-cd', file], stdout=f.fileno()) os.lseek(f.fileno(), 0, os.SEEK_SET) oldsysrepo.add_susetags( solv.xfopen_fd(None, f.fileno()), defvendorid, None, solv.Repo.REPO_NO_INTERNALIZE | solv.Repo.SUSETAGS_RECORD_SHARES) packages = dict() for s in pool.solvables_iter(): evr = s.evr.split('-') release = evr.pop() version = '-'.join(evr) key = s.name + "-" + version + "." + s.arch if re.search('-release', s.name): # just take one version of it key = s.name + "." + s.arch packages[key] = { 'name': s.name, 'version': version, 'arch': s.arch, 'release': release, 'provides': set() } for dep in s.lookup_deparray(solv.SOLVABLE_PROVIDES): packages[key]['provides'].add(str(dep)) output_file = open(output, 'w') print("=Ver: 2.0", file=output_file) for package in sorted(packages): infos = packages[package] print('=Pkg:', infos['name'], infos['version'], infos['release'], infos['arch'], file=output_file) print('+Prv:', file=output_file) for dep in sorted(infos['provides']): print(dep, file=output_file) print('-Prv:', file=output_file)
def _get_solvable_packages(self): """ Return the full list of solvable packages available at the configured repo. This information is read from the solv file created by Zypper. :returns: list """ if not self.repo.is_configured: self.setup_repo(self.repo) self.solv_pool = solv.Pool() self.solv_repo = self.solv_pool.add_repo(str(self.channel_label or self.reponame)) solv_path = os.path.join(self.repo.root, ZYPP_SOLV_CACHE_PATH, self.channel_label or self.reponame, 'solv') if not os.path.isfile(solv_path) or not self.solv_repo.add_solv(solv.xfopen(str(solv_path)), 0): raise SolvFileNotFound(solv_path) self.solv_pool.addfileprovides() self.solv_pool.createwhatprovides() # Solvables with ":" in name are not packages return [pack for pack in self.solv_repo.solvables if ':' not in pack.name]
def prepare_pool(self, arch, ignore_conflicts): pool = solv.Pool() # the i586 DVD is really a i686 one if arch == 'i586': pool.setarch('i686') else: pool.setarch(arch) self.lockjobs[arch] = [] solvables = set() for project, reponame in self.repos: repo = pool.add_repo(project) # check back the repo state to avoid suprises state = repository_arch_state(self.apiurl, project, reponame, arch) if state is None: continue s = f'repo-{project}-{reponame}-{arch}-{state}.solv' if not repo.add_solv(s): raise MismatchedRepoException( 'failed to add repo {}/{}/{}'.format( project, reponame, arch)) for solvable in repo.solvables_iter(): if ignore_conflicts: solvable.unset(solv.SOLVABLE_CONFLICTS) solvable.unset(solv.SOLVABLE_OBSOLETES) # only take the first solvable in the repo chain if not self.use_newest_version and solvable.name in solvables: self.lockjobs[arch].append( pool.Job( solv.Job.SOLVER_SOLVABLE | solv.Job.SOLVER_LOCK, solvable.id)) solvables.add(solvable.name) pool.addfileprovides() pool.createwhatprovides() for locale in self.locales: pool.set_namespaceproviders(solv.NAMESPACE_LANGUAGE, pool.Dep(locale), True) return pool
def merge_susetags(output, files): pool = solv.Pool() pool.setarch() for file in files: file_utils.add_susetags(pool, file) packages = dict() for s in pool.solvables_iter(): evr = s.evr.split('-') release = evr.pop() version = '-'.join(evr) key = s.name + "-" + version + "." + s.arch if re.search('-release', s.name): # just take one version of it key = s.name + "." + s.arch packages[key] = { 'name': s.name, 'version': version, 'arch': s.arch, 'release': release, 'provides': set() } for dep in s.lookup_deparray(solv.SOLVABLE_PROVIDES): packages[key]['provides'].add(str(dep)) output_file = open(output, 'w') print("=Ver: 2.0", file=output_file) for package in sorted(packages): infos = packages[package] print('=Pkg:', infos['name'], infos['version'], infos['release'], infos['arch'], file=output_file) print('+Prv:', file=output_file) for dep in sorted(infos['provides']): print(dep, file=output_file) print('-Prv:', file=output_file)
def list_packages(self, filters, latest): """ List available packages. :returns: list """ if not self.repo.is_configured: self.setup_repo(self.repo) pool = solv.Pool() repo = pool.add_repo(str(self.channel_label or self.reponame)) solv_path = os.path.join(self.repo.root, ZYPP_SOLV_CACHE_PATH, self.channel_label or self.reponame, 'solv') if not os.path.isfile(solv_path) or not repo.add_solv(solv.xfopen(str(solv_path)), 0): raise SolvFileNotFound(solv_path) #TODO: Implement latest #if latest: # pkglist = pkglist.returnNewestByNameArch() #TODO: Implement sort #pkglist.sort(self._sort_packages) to_return = [] for pack in repo.solvables: # Solvables with ":" in name are not packages if ':' in pack.name: continue new_pack = ContentPackage() epoch, version, release = RawSolvablePackage._parse_solvable_evr(pack.evr) new_pack.setNVREA(pack.name, version, release, epoch, pack.arch) new_pack.unique_id = RawSolvablePackage(pack) checksum = pack.lookup_checksum(solv.SOLVABLE_CHECKSUM) new_pack.checksum_type = checksum.typestr() new_pack.checksum = checksum.hex() to_return.append(new_pack) self.num_packages = len(to_return) return to_return
def drops_for_repo(drops, filename): pool = solv.Pool() pool.setarch() facrepo = pool.add_repo("oss") facrepo.add_solv(sys.argv[1]) nonossrepo = pool.add_repo("non-oss") nonossrepo.add_solv(sys.argv[2]) sysrepo = pool.add_repo(filename) sysrepo.add_solv(filename) pool.createwhatprovides() for s in sysrepo.solvables: haveit = False for s2 in pool.whatprovides(s.nameid): if s2.repo == sysrepo or s.nameid != s2.nameid: continue haveit = True if haveit: continue nevr = pool.rel2id(s.nameid, s.evrid, solv.REL_EQ) for s2 in pool.whatmatchesdep(solv.SOLVABLE_OBSOLETES, nevr): if s2.repo == sysrepo: continue haveit = True if haveit: continue if s.name not in drops: drops[s.name] = sysrepo.name # mark it explicitly to avoid having 2 pools while GC is not run del pool
def create_weakremovers(self, target, target_config, directory, output): drops = dict() dropped_repos = dict() root = yaml.safe_load(open(os.path.join(directory, 'config.yml'))) for item in root: key = list(item)[0] opts = item[key] # cast 15.1 to string :) key = str(key) oldrepos = set(glob.glob(os.path.join(directory, '{}_*.packages.xz'.format(key)))) oldrepos |= set(glob.glob(os.path.join(directory, '{}.packages.xz'.format(key)))) for oldrepo in sorted(oldrepos): pool = solv.Pool() pool.setarch() # we need some progress in the debug output - or gocd gets nervous self.logger.debug('checking {}'.format(oldrepo)) oldsysrepo = pool.add_repo(oldrepo) defvendorid = oldsysrepo.meta.lookup_id(solv.SUSETAGS_DEFAULTVENDOR) f = tempfile.TemporaryFile() # FIXME: port to lzma module with python3 st = subprocess.call(['xz', '-cd', oldrepo], stdout=f.fileno()) os.lseek(f.fileno(), 0, os.SEEK_SET) oldsysrepo.add_susetags(solv.xfopen_fd(None, f.fileno()), defvendorid, None, solv.Repo.REPO_NO_INTERNALIZE | solv.Repo.SUSETAGS_RECORD_SHARES) for arch in self.all_architectures: for project, repo in self.repos: fn = os.path.join(CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, repo, arch)) r = pool.add_repo('/'.join([project, repo])) r.add_solv(fn) pool.createwhatprovides() for s in oldsysrepo.solvables_iter(): if s.arch == 'src': continue oldarch = s.arch if oldarch == 'i686': oldarch = 'i586' #print('check', s.name, oldarch) haveit = False for s2 in pool.whatprovides(s.nameid): if s2.repo == oldsysrepo or s.nameid != s2.nameid: continue newarch = s2.arch if newarch == 'i686': newarch = 'i586' if oldarch != newarch and newarch != 'noarch' and oldarch != 'noarch': continue haveit = True break if haveit: continue # check for already obsoleted packages nevr = pool.rel2id(s.nameid, s.evrid, solv.REL_EQ) for s2 in pool.whatmatchesdep(solv.SOLVABLE_OBSOLETES, nevr): if s2.repo == oldsysrepo: continue haveit = True break if haveit: continue drops.setdefault(s.name, {'repo': key, 'archs': set()}) if oldarch == 'noarch': drops[s.name]['archs'] |= set(self.all_architectures) else: drops[s.name]['archs'].add(oldarch) dropped_repos[key] = 1 del pool for repo in sorted(dropped_repos): repo_output = False exclusives = dict() for name in sorted(drops): if drops[name]['repo'] != repo: continue if len(drops[name]['archs']) == len(self.all_architectures): if not repo_output: print('#', repo, file=output) repo_output = True print('Provides: weakremover({})'.format(name), file=output) else: jarch = ' '.join(sorted(drops[name]['archs'])) exclusives.setdefault(jarch, []).append(name) for arch in sorted(exclusives): if not repo_output: print('#', repo, file=output) repo_output = True print('%ifarch {}'.format(arch), file=output) for name in sorted(exclusives[arch]): print('Provides: weakremover({})'.format(name), file=output) print('%endif', file=output) output.flush()
#!/usr/bin/python2 # check if all packages in a repo are newer than all other repos from __future__ import print_function import sys import os import re import solv pool = solv.Pool() args = sys.argv[1:] if len(args) < 2: print("usage: checknewer NEWREPO OLDREPO1 [OLDREPO2...]") sys.exit(1) firstrepo = None for arg in args: argf = solv.xfopen(arg) repo = pool.add_repo(arg) if not firstrepo: firstrepo = repo if re.search(r'solv$', arg): repo.add_solv(argf) elif re.search(r'primary\.xml', arg): repo.add_rpmmd(argf, None) elif re.search(r'packages', arg): repo.add_susetags(argf, 0, None) else: print("%s: unknown repo type" % (arg)) sys.exit(1)
def create_weakremovers(self, target, target_config, directory, output): drops = dict() dropped_repos = dict() root = yaml.safe_load(open(os.path.join(directory, 'config.yml'))) for item in root: key = list(item)[0] # cast 15.1 to string :) key = str(key) oldrepos = set() for suffix in ['xz', 'zst']: oldrepos |= set( glob.glob( os.path.join(directory, f"{key}_*.packages.{suffix}"))) oldrepos |= set( glob.glob( os.path.join(directory, f"{key}.packages.{suffix}"))) for oldrepo in sorted(oldrepos): pool = solv.Pool() pool.setarch() # we need some progress in the debug output - or gocd gets nervous self.logger.debug('checking {}'.format(oldrepo)) oldsysrepo = file_utils.add_susetags(pool, oldrepo) for arch in self.all_architectures: for project, repo in self.repos: # check back the repo state to avoid suprises state = repository_arch_state(self.apiurl, project, repo, arch) if state is None: self.logger.debug( f'Skipping {project}/{repo}/{arch}') fn = f'repo-{project}-{repo}-{arch}-{state}.solv' r = pool.add_repo('/'.join([project, repo])) if not r.add_solv(fn): raise MismatchedRepoException( 'failed to add repo {}/{}/{}.'.format( project, repo, arch)) pool.createwhatprovides() accepted_archs = set(self.all_architectures) accepted_archs.add('noarch') for s in oldsysrepo.solvables_iter(): oldarch = s.arch if oldarch == 'i686': oldarch = 'i586' if oldarch not in accepted_archs: continue haveit = False for s2 in pool.whatprovides(s.nameid): if s2.repo == oldsysrepo or s.nameid != s2.nameid: continue newarch = s2.arch if newarch == 'i686': newarch = 'i586' if oldarch != newarch and newarch != 'noarch' and oldarch != 'noarch': continue haveit = True break if haveit: continue # check for already obsoleted packages nevr = pool.rel2id(s.nameid, s.evrid, solv.REL_EQ) for s2 in pool.whatmatchesdep(solv.SOLVABLE_OBSOLETES, nevr): if s2.repo == oldsysrepo: continue haveit = True break if haveit: continue if s.name not in drops: drops[s.name] = {'repo': key, 'archs': set()} if oldarch == 'noarch': drops[s.name]['archs'] |= set(self.all_architectures) else: drops[s.name]['archs'].add(oldarch) dropped_repos[key] = 1 del pool for repo in sorted(dropped_repos): repo_output = False exclusives = dict() for name in sorted(drops): if drops[name]['repo'] != repo: continue if drops[name]['archs'] == set(self.all_architectures): if not repo_output: print('#', repo, file=output) repo_output = True print('Provides: weakremover({})'.format(name), file=output) else: jarch = ' '.join(sorted(drops[name]['archs'])) exclusives.setdefault(jarch, []).append(name) for arch in sorted(exclusives): if not repo_output: print('#', repo, file=output) repo_output = True print('%ifarch {}'.format(arch), file=output) for name in sorted(exclusives[arch]): print('Provides: weakremover({})'.format(name), file=output) print('%endif', file=output) output.flush()
def main(): parser = argparse.ArgumentParser(description="RPM cli dependency solver") parser.add_argument('--repodir', default='/etc/yum.repos.d/', type=dir_path, dest='repodir', help='repository directory') parser.add_argument('--basearch', default="x86_64", type=str, help="Base architecture") parser.add_argument('--releasever', default="", type=str, help="Release version") parser.add_argument('--output', default="./", help="Directory to use for json export") parser.add_argument('packages', type=str, nargs='+', help='list of packages or solvable glob expression.\n' \ 'It accepts `repo:` and `selection:` prexif.') parser.add_argument('--weak', action='store_true', default=False, help="The solver tries to fulfill weak jobs, " \ "but does not report a problem " \ "if it is not possible to do so.") parser.add_argument('--reportupdateinfo', action='store_true', default=False, help="Enable updateinfo report to json output") parser.add_argument('-v', '--verbose', action='count', default=0) args = parser.parse_args() level = logging.WARNING verbose = args.verbose if verbose == 1: level = logging.INFO elif verbose >= 2: level = logging.DEBUG root = logging.getLogger() root.setLevel(level) handler = logging.StreamHandler(sys.stderr) handler.setLevel(level) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) root.addHandler(handler) logger.debug('Read argpase inputs') output = os.path.abspath(args.output) if os.path.isdir(output): output = os.path.join(output, 'data.json') export_dir = os.path.dirname(output) logger.debug('Check output file access: `{}` file'.format(output)) output_exists = os.path.exists(output) if not output_exists and not os.access( export_dir, os.R_OK | os.W_OK | os.X_OK | os.F_OK): logger.error( 'Unable to write data output folder `{}` file'.format(export_dir)) exit(1) if output_exists \ and os.path.isfile(output) \ and not os.access(output, os.R_OK|os.W_OK): logger.error( 'Unable to write data output file `{}` file'.format(output)) exit(1) releasever = args.releasever if not releasever: # read local rpm # to retrieve system-release tmp = solv.Pool() sysrepo = repo_system('@System', 'system') sysrepo.load(tmp) tmp.createwhatprovides() release_sel = tmp.select('system-release', solv.Selection.SELECTION_PROVIDES) for s in release_sel.solvables(): releasever = s.evr.split('-')[0] logger.debug('Read releasever {}'.format(releasever)) tmp.free() # problems_class = interactive problems_class = MultiversionProblemSolver data_writer = data_json # action_solver = solv.Job.SOLVER_DISTUPGRADE # action_solver = solv.Job.SOLVER_UPDATE # use a fake install to force full rpm depedencies action_solver = solv.Job.SOLVER_INSTALL # read all repo configs repos = [] reposdir = args.repodir basearch = args.basearch logger.info('Fetch repodata') for repo_file in sorted(glob.glob('%s/*.repo' % reposdir)): config = configparser.ConfigParser() config.read(repo_file) for section in config.sections(): repoattr = { 'enabled': 0, 'priority': 99, 'autorefresh': 1, 'type': 'rpm', 'metadata_expire': "900" } repoattr.update(config[section]) if repoattr['type'] == 'rpm': repo = repo_repomd(section, 'repomd', repoattr, basearch=args.basearch, releasever=releasever) repos.append(repo) pool = solv.Pool() pool.setarch(args.basearch) pool.set_loadcallback(load_stub) # now load all enabled repos into the pool for repo in repos: if int(repo['enabled']): repo.load(pool) cmdlinerepo = None packages = [] for arg in args.packages: if arg.endswith(".rpm") and os.access(arg, os.R_OK): if not cmdlinerepo: cmdlinerepo = repo_cmdline('@commandline', 'cmdline') cmdlinerepo.load(pool) cmdlinerepo['packages'] = {} s = cmdlinerepo.handle.add_rpm( arg, solv.Repo.REPO_REUSE_REPODATA | solv.Repo.REPO_NO_INTERNALIZE) if not s: print(pool.errstr) sys.exit(1) cmdlinerepo['packages'][arg] = s elif os.access(arg, os.R_OK): # read a list of packages from file with open(arg, 'r') as f: for a in f.readlines(): # remove comment from line p = a.strip().split('#')[0] if p: packages.append(p) else: packages.append(arg) if cmdlinerepo: cmdlinerepo.handle.internalize() addedprovides = pool.addfileprovides_queue() if addedprovides: #sysrepo.updateaddedprovides(addedprovides) for repo in repos: repo.updateaddedprovides(addedprovides) pool.createwhatprovides() # FIXME: workaroud to have less # confict to solve # this helps to keep as much packages # as possible in the data.json logger.debug('Remove SOLVABLE_CONFLICTS SOLVABLE_OBSOLETES from pool') for s in pool.solvables: s.unset(solv.SOLVABLE_CONFLICTS) s.unset(solv.SOLVABLE_OBSOLETES) #s.unset(solv.SOLVABLE_FILELIST) action_solver |= solv.Job.SOLVER_CLEANDEPS # action_solver |= solv.Job.SOLVER_FORCEBEST if args.weak: action_solver |= solv.Job.SOLVER_WEAK logger.info('Build job stack') # convert arguments into jobs js = JobSolver(pool, repos, action_solver) jobs = js.get_jobs_from_packages(packages) if not jobs: print("no package matched.") sys.exit(1) if verbose > 2: pool.set_debuglevel(verbose - 2) logger.info('Solv jobs') problem_solver = problems_class(pool) solver = problem_solver.run_problem_loop(jobs) # no problems, show transaction trans = solver.transaction() del solver if trans.isempty(): print("Nothing to do.") sys.exit(0) print('') print("Transaction summary:") print('') for cl in trans.classify( solv.Transaction.SOLVER_TRANSACTION_SHOW_OBSOLETES | solv.Transaction.SOLVER_TRANSACTION_OBSOLETE_IS_UPGRADE): if cl.type == solv.Transaction.SOLVER_TRANSACTION_ERASE: print("%d erased packages:" % cl.count) elif cl.type == solv.Transaction.SOLVER_TRANSACTION_INSTALL: print("%d installed packages:" % cl.count) elif cl.type == solv.Transaction.SOLVER_TRANSACTION_REINSTALLED: print("%d reinstalled packages:" % cl.count) elif cl.type == solv.Transaction.SOLVER_TRANSACTION_DOWNGRADED: print("%d downgraded packages:" % cl.count) elif cl.type == solv.Transaction.SOLVER_TRANSACTION_CHANGED: print("%d changed packages:" % cl.count) elif cl.type == solv.Transaction.SOLVER_TRANSACTION_UPGRADED: print("%d upgraded packages:" % cl.count) elif cl.type == solv.Transaction.SOLVER_TRANSACTION_VENDORCHANGE: print("%d vendor changes from '%s' to '%s':" % (cl.count, cl.fromstr, cl.tostr)) elif cl.type == solv.Transaction.SOLVER_TRANSACTION_ARCHCHANGE: print("%d arch changes from '%s' to '%s':" % (cl.count, cl.fromstr, cl.tostr)) else: continue print("install size change: %d K" % trans.calc_installsizechange()) logger.info('Build data output') dw = data_writer(pool) updateinfo = args.reportupdateinfo data = dw.format(cl.solvables(), updateinfo=updateinfo) with open(output, 'w', encoding='utf-8') as f: json.dump(data, f, ensure_ascii=False, indent=4)
def update_project(apiurl, project): # Cache dir specific to hostname and project. host = urlparse(apiurl).hostname cache_dir = CacheManager.directory('update_repo_handler', host, project) repo_dir = os.path.join(cache_dir, '000update-repos') # development aid checkout = True if checkout: if os.path.exists(cache_dir): shutil.rmtree(cache_dir) os.makedirs(cache_dir) osc.core.checkout_package(apiurl, project, '000update-repos', expand_link=True, prj_dir=cache_dir) root = yaml.safe_load(open(os.path.join(repo_dir, 'config.yml'))) for item in root: key = list(item)[0] opts = item[key] # cast 15.1 to string :) key = str(key) if not opts['url'].endswith('/'): opts['url'] += '/' if opts.get('refresh', False): opts['build'] = dump_solv_build(opts['url']) path = '{}_{}.packages'.format(key, opts['build']) else: path = key + '.packages' packages_file = os.path.join(repo_dir, path) if os.path.exists(packages_file + '.xz'): print(path, 'already exists') continue solv_file = packages_file + '.solv' dump_solv(solv_file, opts['url']) pool = solv.Pool() pool.setarch() if opts.get('refresh', False): for file in glob.glob(os.path.join(repo_dir, '{}_*.packages.xz'.format(key))): repo = pool.add_repo(file) defvendorid = repo.meta.lookup_id(solv.SUSETAGS_DEFAULTVENDOR) f = tempfile.TemporaryFile() # FIXME: port to lzma module with python3 st = subprocess.call(['xz', '-cd', file], stdout=f.fileno()) os.lseek(f.fileno(), 0, os.SEEK_SET) repo.add_susetags(solv.xfopen_fd(None, f.fileno()), defvendorid, None, solv.Repo.REPO_NO_INTERNALIZE | solv.Repo.SUSETAGS_RECORD_SHARES) repo1 = pool.add_repo(''.join(random.choice(string.ascii_letters) for _ in range(5))) repo1.add_solv(solv_file) print_repo_delta(pool, repo1, open(packages_file, 'w')) subprocess.call(['xz', '-9', packages_file]) os.unlink(solv_file) url = osc.core.makeurl(apiurl, ['source', project, '000update-repos', path + '.xz']) osc.core.http_PUT(url, data=open(packages_file + '.xz', 'rb').read()) del pool
def do_create_droplist(self, subcmd, opts, *oldsolv): """${cmd_name}: generate list of obsolete packages The globally specified repositories are taken as the current package set. All solv files specified on the command line are old versions of those repos. The command outputs all package names that are no longer contained in or provided by the current repos. ${cmd_usage} ${cmd_option_list} """ drops = dict() for arch in self.tool.architectures: for old in oldsolv: logger.debug("%s: processing %s", arch, old) pool = solv.Pool() pool.setarch(arch) for prp in self.tool.repos: project, repo = prp.split('/') fn = os.path.join( CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, repo, arch)) r = pool.add_repo(prp) r.add_solv(fn) sysrepo = pool.add_repo( os.path.basename(old).replace('.repo.solv', '')) sysrepo.add_solv(old) pool.createwhatprovides() for s in sysrepo.solvables: haveit = False for s2 in pool.whatprovides(s.nameid): if s2.repo == sysrepo or s.nameid != s2.nameid: continue haveit = True if haveit: continue nevr = pool.rel2id(s.nameid, s.evrid, solv.REL_EQ) for s2 in pool.whatmatchesdep(solv.SOLVABLE_OBSOLETES, nevr): if s2.repo == sysrepo: continue haveit = True if haveit: continue if s.name not in drops: drops[s.name] = sysrepo.name # mark it explicitly to avoid having 2 pools while GC is not run del pool for reponame in sorted(set(drops.values())): print("<!-- %s -->" % reponame) for p in sorted(drops): if drops[p] != reponame: continue print(" <obsoletepackage>%s</obsoletepackage>" % p)
def update_project(apiurl, project, fixate=None): # Cache dir specific to hostname and project. host = urlparse(apiurl).hostname cache_dir = CacheManager.directory('update_repo_handler', host, project) repo_dir = os.path.join(cache_dir, '000update-repos') # development aid checkout = True if checkout: if os.path.exists(cache_dir): shutil.rmtree(cache_dir) os.makedirs(cache_dir) osc.core.checkout_package(apiurl, project, '000update-repos', expand_link=True, prj_dir=cache_dir) package = osc.core.Package(repo_dir) root = yaml.safe_load(open(os.path.join(repo_dir, 'config.yml'))) if fixate: return fixate_target(root, package, fixate) for item in root: key = list(item)[0] opts = item[key] # cast 15.1 to string :) key = str(key) if not opts['url'].endswith('/'): opts['url'] += '/' if opts.get('refresh', False): opts['build'] = dump_solv_build(opts['url']) path = '{}_{}.packages'.format(key, opts['build']) else: path = key + '.packages' packages_file = os.path.join(repo_dir, path) if opts.get('refresh', False): oldfiles = target_files(repo_dir, key) if len(oldfiles) > 10: oldest = oldfiles[-1] if oldest.count('and_before') > 1: raise Exception('The oldest is already a compated file') oldest = oldest.replace('.packages.xz', '_and_before.packages') oldest = oldest.replace('.packages.zst', '_and_before.packages') merge_susetags(oldest, oldfiles) for file in oldfiles: os.unlink(file) package.delete_file(os.path.basename(file)) subprocess.check_call(['zstd', '-19', '--rm', oldest]) package.addfile(os.path.basename(oldest) + ".zst") if os.path.exists(packages_file + '.zst') or os.path.exists(packages_file + '.xz'): print(path, 'already exists') continue solv_file = packages_file + '.solv' dump_solv(solv_file, opts['url']) pool = solv.Pool() pool.setarch() if opts.get('refresh', False): for file in target_files(repo_dir, key): file_utils.add_susetags(pool, file) repo1 = pool.add_repo(''.join( random.choice(string.ascii_letters) for _ in range(5))) repo1.add_solv(solv_file) print_repo_delta(pool, repo1, open(packages_file, 'w')) subprocess.call(['zstd', '-19', '--rm', packages_file]) os.unlink(solv_file) package.addfile(os.path.basename(path + '.zst')) del pool package.commit('Automatic update')