def _get_cache_dir(*args, **kwargs): if test_utils.on_centos(7): from yum.misc import getCacheDir return getCacheDir(*args, **kwargs) else: from dnf.yum.misc import getCacheDir return getCacheDir()
def main(options, args): days = options.days repoids = args my = YumQuiet() if options.config: my.doConfigSetup(init_plugins=False, fn=options.config) else: my.doConfigSetup(init_plugins=False) if os.geteuid() != 0 or options.tempcache: cachedir = getCacheDir() if cachedir is None: print "Error: Could not make cachedir, exiting" sys.exit(50) my.repos.setCacheDir(cachedir) if len(repoids) > 0: for repo in my.repos.repos.values(): if repo.id not in repoids: repo.disable() else: repo.enable() try: my._getRepos() except yum.Errors.RepoError, e: print >> sys.stderr, '%s' % e print 'Cannot continue' sys.exit(1)
def check_repo_closure(): """Find reposync config file(s) and check repoclosure against the internal repo with the repos in the config(s) as lookaside repos """ if os.getenv('OST_SKIP_SYNC', False): raise SkipTest('OST_SKIP_SYNC is set, skipping repo closure check') configs = glob.glob( os.path.join(os.environ.get('SUITE'), '*reposync*.repo') ) if not configs: raise RuntimeError("Could not find reposync config file.") for config in configs: tmp_cache_dir = getCacheDir('/var/tmp/', reuse=False) command = reposync_config_file(config, tmp_cache_dir) try: subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: err_msg = ("\n" "## Params: {com}.\n" "## Exist status: {es}\n" "## Output: {out}\n\n" ).format(com=e.cmd, es=e.returncode, out=e.output,) raise SkipTest(err_msg) #raise RuntimeError(err_msg) finally: clean_tmp_cache(tmp_cache_dir)
def __init__(self, repolist=[], yumbase=None, mdconf=None, mdbase_class=None): self.repolist = repolist self.outputdir = '%s/merged_repo' % os.getcwd() self.exclude_tuples = [] self.sort_func = self._sort_func # callback function to magically sort pkgs if not mdconf: self.mdconf = createrepo.MetaDataConfig() else: self.mdconf = mdconf if not mdbase_class: self.mdbase_class = createrepo.MetaDataGenerator else: self.mdbase_class = mdbase_class if not yumbase: self.yumbase = yum.YumBase() else: self.yumbase = yumbase self.yumbase.conf.cachedir = getCacheDir() self.yumbase.conf.cache = 0 # default to all arches self.archlist = unique(rpmUtils.arch.arches.keys() + rpmUtils.arch.arches.values()) self.groups = True self.updateinfo = True
def check_repo_closure(): """Find reposync config file(s) and check repoclosure against the internal repo with the repos in the config(s) as lookaside repos """ if os.getenv('SKIP_SYNC', False): raise SkipTest('SKIP_SYNC is set, skipping repo closure check') configs = glob.glob( os.path.join(os.environ.get('SUITE'), '*reposync*.repo')) if not configs: raise RuntimeError("Could not find reposync config file.") for config in configs: tmp_cache_dir = getCacheDir('/var/tmp/', reuse=False) command = reposync_config_file(config, tmp_cache_dir) try: subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: err_msg = ("\n" "## Params: {com}.\n" "## Exist status: {es}\n" "## Output: {out}\n\n").format( com=e.cmd, es=e.returncode, out=e.output, ) raise SkipTest(err_msg) #raise RuntimeError(err_msg) finally: clean_tmp_cache(tmp_cache_dir)
def main(): (opts, cruft) = parseArgs() my = RepoClosure(arch=opts.arch, config=opts.config, builddeps=opts.builddeps, pkgonly=opts.pkg, grouponly=opts.group, basearch=opts.basearch) if opts.repofrompath: # setup the fake repos for repo in opts.repofrompath: repoid,repopath = tuple(repo.split(',')) if repopath.startswith('http') or repopath.startswith('ftp') or repopath.startswith('file:'): baseurl = repopath else: repopath = os.path.abspath(repopath) baseurl = 'file://' + repopath newrepo = yum.yumRepo.YumRepository(repoid) newrepo.name = repopath newrepo.baseurl = baseurl newrepo.basecachedir = my.conf.cachedir newrepo.metadata_expire = 0 newrepo.timestamp_check = False my.repos.add(newrepo) my.repos.enableRepo(newrepo.id) my.logger.info( "Added %s repo from %s" % (repoid,repopath)) all_repoids = [r.id for r in my.repos.repos.values()] lookaside_repos = filter_repos(opts.lookaside, all_repoids) if opts.repoid: specified_repos = filter_repos(opts.repoid, all_repoids) for repo in my.repos.repos.values(): if repo.id in specified_repos + lookaside_repos: repo.enable() else: repo.disable() if lookaside_repos: my.lookaside = lookaside_repos if os.geteuid() != 0 or opts.tempcache: cachedir = getCacheDir() if cachedir is None: my.logger.error("Error: Could not make cachedir, exiting") sys.exit(50) my.repos.setCacheDir(cachedir) if not opts.quiet: my.logger.info('Reading in repository metadata - please wait....') try: my.readMetadata() except yum.Errors.RepoError, e: my.logger.info(e) my.logger.info('Some dependencies may not be complete for this repository') my.logger.info('Run as root to get all dependencies or use -t to enable a user temp cache')
def doit(dir, treename, mail=True, testing=False): for arch in os.listdir(dir): conffile = generateConfig(dir, treename, arch, testing) if not conffile: continue if arch == 'i386': carch = 'i686' elif arch == 'ppc': carch = 'ppc64' elif arch == 'sparc': carch = 'sparc64v' else: carch = arch my = repoclosure.RepoClosure(config = conffile, arch = [carch]) cachedir = getCacheDir() my.repos.setCacheDir(cachedir) my.readMetadata() baddeps = my.getBrokenDeps(newest = False) pkgs = baddeps.keys() tmplist = [(x.returnSimple('name'), x) for x in pkgs] tmplist.sort() pkgs = [x for (key, x) in tmplist] if len(pkgs) > 0: print "Broken deps for %s" % (arch,) print "----------------------------------------------------------" for pkg in pkgs: if not pkg.repoid.startswith('epel'): continue srcpkg = getSrcPkg(pkg) addOwner(owners, srcpkg) if not deps.has_key(srcpkg): deps[srcpkg] = {} pkgid = "%s-%s" % (pkg.name, pkg.printVer()) if not deps[srcpkg].has_key(pkgid): deps[srcpkg][pkgid] = {} broken = [] for (n, f, v) in baddeps[pkg]: print "\t%s" % printableReq(pkg, (n, f, v)) blamelist = assignBlame(my, n, owners) broken.append( (pkg, (n, f, v), blamelist) ) deps[srcpkg][pkgid][arch] = broken print "\n\n" os.unlink(conffile) shutil.rmtree(cachedir, ignore_errors = True) pkglist = deps.keys() for pkg in pkglist: generateSpam(pkg, treename, mail)
def doit(dir, treename, mail=True, testing=False): for arch in os.listdir(dir): conffile = generateConfig(dir, treename, arch, testing) if not conffile: continue if arch == 'i386': carch = 'i686' elif arch == 'ppc': carch = 'ppc64' elif arch == 'sparc': carch = 'sparc64v' else: carch = arch my = repoclosure.RepoClosure(config=conffile, arch=[carch]) cachedir = getCacheDir() my.repos.setCacheDir(cachedir) my.readMetadata() baddeps = my.getBrokenDeps(newest=False) pkgs = baddeps.keys() tmplist = [(x.returnSimple('name'), x) for x in pkgs] tmplist.sort() pkgs = [x for (key, x) in tmplist] if len(pkgs) > 0: print "Broken deps for %s" % (arch, ) print "----------------------------------------------------------" for pkg in pkgs: if not pkg.repoid.startswith('epel'): continue srcpkg = getSrcPkg(pkg) addOwner(owners, srcpkg) if not deps.has_key(srcpkg): deps[srcpkg] = {} pkgid = "%s-%s" % (pkg.name, pkg.printVer()) if not deps[srcpkg].has_key(pkgid): deps[srcpkg][pkgid] = {} broken = [] for (n, f, v) in baddeps[pkg]: print "\t%s" % printableReq(pkg, (n, f, v)) blamelist = assignBlame(my, n, owners) broken.append((pkg, (n, f, v), blamelist)) deps[srcpkg][pkgid][arch] = broken print "\n\n" os.unlink(conffile) shutil.rmtree(cachedir, ignore_errors=True) pkglist = deps.keys() for pkg in pkglist: generateSpam(pkg, treename, mail)
def __init__(self, queue, bodhi_workers_queue, bodhi_workers_count, main_thread, parent=None): super(PackagesWorker, self).__init__(parent) self.main_thread = main_thread self.queue = queue self.bodhi_workers_queue = bodhi_workers_queue self.bodhi_workers_count = bodhi_workers_count self.yb = yum.YumBase() cachedir = getCacheDir() self.yb.repos.setCacheDir(cachedir) # RPM Transactions self.rpmTS = rpm.TransactionSet()
def main(): (opts, cruft) = parseArgs() if opts.noowners: owners = {} else: sys.path.append('/srv/extras-push/work/extras-repoclosure') from PackageOwners import PackageOwners owners = PackageOwners() #owners.FromCVS(workdir = ownersworkdir) if not owners.FromURL(): sys.exit(1) solvers = {} for dist in dists: solver = MySolver(config=opts.config) for repo in solver.repos.repos.values(): if re.sub('\D+', '', repo.id) != dist: repo.disable() else: repo.enable() solvers[dist] = solver if os.geteuid() != 0 or opts.tempcache or opts.cachedir != '': if opts.cachedir != '': cachedir = opts.cachedir else: cachedir = getCacheDir() if cachedir is None: print "Error: Could not make cachedir, exiting" sys.exit(50) for repo in solvers.values(): repo.repos.setCacheDir(cachedir) if not opts.quiet: print 'Reading in repository metadata - please wait....' for dist in solvers.keys(): try: solvers[dist].readMetadata() except yum.Errors.RepoError, e: print 'Metadata read error for dist %s, excluding it' % dist del solvers[dist]
def main(): (opts, cruft) = parseArgs() if opts.noowners: owners = {} else: sys.path.append('/srv/extras-push/work/extras-repoclosure') from PackageOwners import PackageOwners owners = PackageOwners() #owners.FromCVS(workdir = ownersworkdir) if not owners.FromURL(): sys.exit(1) solvers = {} for dist in dists: solver = MySolver(config = opts.config) for repo in solver.repos.repos.values(): if re.sub('\D+', '', repo.id) != dist: repo.disable() else: repo.enable() solvers[dist] = solver if os.geteuid() != 0 or opts.tempcache or opts.cachedir != '': if opts.cachedir != '': cachedir = opts.cachedir else: cachedir = getCacheDir() if cachedir is None: print "Error: Could not make cachedir, exiting" sys.exit(50) for repo in solvers.values(): repo.repos.setCacheDir(cachedir) if not opts.quiet: print 'Reading in repository metadata - please wait....' for dist in solvers.keys(): try: solvers[dist].readMetadata() except yum.Errors.RepoError, e: print 'Metadata read error for dist %s, excluding it' % dist del solvers[dist]
def __init__(self, callback=None): import tempfile yum.YumBase.__init__(self) if callback: callback.log = self.log self.repos.callback = callback # Set up a temporary root for RPM so it thinks there's nothing # installed, but don't use it until after we set up the config info. # We need to find the fedora-release package in the real root so # we can set $releasever and other variables from /etc/yum.repos.d/* # (#190999). self.temproot = tempfile.mkdtemp(dir="/tmp") self.doConfigSetup(init_plugins=False) if os.geteuid() != 0: cachedir = getCacheDir() if cachedir is None: self.errorlog("0, Error: Could not make cachedir, exiting") sys.exit(1) self.repos.setCacheDir(cachedir) if callback: callback.next_task() self.conf.installroot = self.temproot try: self.doTsSetup() except yum.Errors.RepoError, msg: text = _( "Package selection is disabled due to an error in setup. Please fix your repository configuration and try again.\n\n%s" ) % msg dlg = gtk.MessageDialog(None, 0, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, text) dlg.set_position(gtk.WIN_POS_CENTER_ON_PARENT) dlg.set_modal(True) rc = dlg.run() dlg.destroy() self.packagesEnabled = False return
def main(): my = YumDebugDump() # make yum-debug-dump work as non root user. if my.conf.uid != 0: cachedir = getCacheDir() if cachedir is None: my.logger.error("Error: Could not make cachedir, exiting") sys.exit(50) my.repos.setCacheDir(cachedir) # Turn off cache my.conf.cache = 0 # make sure the repos know about it, too my.repos.setCache(0) filename = None if my.args: filename = my.args[0] fn = my.create_debug_file(fn=filename) print "Output written to: %s" % fn
def __init__(self, repolist=[], yumbase=None, mdconf=None, mdbase_class=None ): self.repolist = repolist self.outputdir = '%s/merged_repo' % os.getcwd() self.exclude_tuples = [] self.sort_func = self._sort_func # callback function to magically sort pkgs if not mdconf: self.mdconf = createrepo.MetaDataConfig() else: self.mdconf = mdconf if not mdbase_class: self.mdbase_class = createrepo.MetaDataGenerator else: self.mdbase_class = mdbase_class if not yumbase: self.yumbase = yum.YumBase() else: self.yumbase = yumbase self.yumbase.conf.cachedir = getCacheDir() self.yumbase.conf.cache = 0 # default to all arches self.archlist = unique(rpmUtils.arch.arches.keys() + rpmUtils.arch.arches.values()) self.groups = True self.updateinfo = True
def main(): (opts, cruft) = parseArgs() my = RepoClosure(arch=opts.arch, config=opts.config, builddeps=opts.builddeps, pkgonly=opts.pkg, grouponly=opts.group, basearch=opts.basearch) if opts.repofrompath: # setup the fake repos for repo in opts.repofrompath: repoid, repopath = tuple(repo.split(',')) if repopath.startswith('http') or repopath.startswith( 'ftp') or repopath.startswith('file:'): baseurl = repopath else: repopath = os.path.abspath(repopath) baseurl = 'file://' + repopath newrepo = yum.yumRepo.YumRepository(repoid) newrepo.name = repopath newrepo.baseurl = baseurl newrepo.basecachedir = my.conf.cachedir newrepo.metadata_expire = 0 newrepo.timestamp_check = False my.repos.add(newrepo) my.repos.enableRepo(newrepo.id) my.logger.info("Added %s repo from %s" % (repoid, repopath)) all_repoids = [r.id for r in my.repos.repos.values()] lookaside_repos = filter_repos(opts.lookaside, all_repoids) if opts.repoid: specified_repos = filter_repos(opts.repoid, all_repoids) for repo in my.repos.repos.values(): if repo.id in specified_repos + lookaside_repos: repo.enable() else: repo.disable() if lookaside_repos: my.lookaside = lookaside_repos if os.geteuid() != 0 or opts.tempcache: cachedir = getCacheDir() if cachedir is None: my.logger.error("Error: Could not make cachedir, exiting") sys.exit(50) my.repos.setCacheDir(cachedir) if not opts.quiet: my.logger.info('Reading in repository metadata - please wait....') try: my.readMetadata() except yum.Errors.RepoError, e: my.logger.info(e) my.logger.info( 'Some dependencies may not be complete for this repository') my.logger.info( 'Run as root to get all dependencies or use -t to enable a user temp cache' )
def doit(app, repotype, packages = [], removal = False): """ Wrapper function for everything """ i_am_broken = False archs = app.config.get("repositories", "archs").split() for arch in archs: app.logger.info("Arch: " + arch) if packages != []: if removal: tmprepodir = create_tmp_repo(app, packages, arch, repotype) else: tmprepodir = create_tmp_repo(app, packages, arch) else: tmprepodir = "" conffile = generate_config(app, repotype, arch, tmprepodir, removal) if not conffile: continue if arch == 'i386': carch = 'i686' elif arch == 'ppc': carch = 'ppc64' elif arch == 'sparc': carch = 'sparc64v' else: carch = arch app.logger.info("I will next calculate the dependency chains.") myrc = repoclosure.RepoClosure(config = conffile, arch = [carch]) cachedir = getCacheDir(reuse=False) myrc.repos.setCacheDir(cachedir) app.logger.info("Processing metadata returned from repoclosure...") myrc.readMetadata() app.logger.info("Now calculate the dependencies...") app.logger.info("It might take a minute or two.") baddeps = myrc.getBrokenDeps(newest = True) baddeps = filterout(app, baddeps) if baddeps == -1: return "baddep" pkgs = baddeps.keys() tmplist = [(x.returnSimple('name'), x) for x in pkgs] tmplist.sort() pkgs = [x for (key, x) in tmplist] if len(pkgs) > 0: i_am_broken = True app.logger.warning("Broken deps for %s" % (arch)) app.logger.warning("----------------------------------------------") else: app.logger.info("We are clean. No broken dependencies.") for pkg in pkgs: srcpkg = get_src_pkg(pkg) if not DEPS.has_key(srcpkg): DEPS[srcpkg] = {} pkgid = "%s-%s" % (pkg.name, pkg.printVer()) if not DEPS[srcpkg].has_key(pkgid): DEPS[srcpkg][pkgid] = {} broken = [] for (name, depflag, version) in baddeps[pkg]: app.logger.warning("\t%s" % printable_req(pkg, (name, depflag, version))) blamelist = assign_blame(myrc, name) broken.append( (pkg, (name, depflag, version), blamelist) ) DEPS[srcpkg][pkgid][arch] = broken if tmprepodir != "": app.logger.info("Clean up: removing the temporary repo dir: " + tmprepodir) shutil.rmtree(tmprepodir, ignore_errors = True) os.unlink(conffile) app.logger.info("Clean up: removing the cachedir: "+ cachedir) shutil.rmtree(cachedir, ignore_errors = True) app.logger.info("Done!\n\n") pkglist = DEPS.keys() spamdata = "" for pkg in pkglist: spamdata += generate_spam(pkg, repotype) if i_am_broken == True: return spamdata else: return ""
def main(): parser = OptionParser() parser.usage = """ verifytree - verify that a local yum repository is consistent verifytree /path/to/repo""" parser.add_option("-a","--checkall",action="store_true",default=False, help="Check all packages in the repo") parser.add_option("--nocomps", "--nogroups",action="store_true", default=False, help="Do not read and check comps") parser.add_option("--noplugins",action="store_true",default=False, help="Do not load any plugins") parser.add_option("-t","--testopia",action="store",type="int", help="Report results to the given testopia run number") parser.add_option("-r","--treeinfo", action="store_true", default=False, help="check the checksums of listed files in a .treeinfo file, if available") opts, args = parser.parse_args() if not args: print "Must provide a file url to the repo" sys.exit(1) # FIXME: check that "args" is a valid dir before proceeding # (exists, isdir, contains .treeinfo, etc) url = args[0] if url[0] == '/': url = 'file://' + url s = urlparse.urlsplit(url)[0] h,d = urlparse.urlsplit(url)[1:3] if s != 'file': print "Must be a file:// url or you will not like this" sys.exit(1) repoid = '%s/%s' % (h, d) repoid = repoid.replace('/', '_') # Bad things happen if we're missing a trailing slash here if url[-1] != '/': url += '/' basedir = url.replace('file://', '') # for a normal path thing my = yum.YumBase() if opts.noplugins: my.preconf.init_plugins = False my.conf.cachedir = getCacheDir() my.repos.disableRepo('*') newrepo = yum.yumRepo.YumRepository(repoid) newrepo.name = repoid newrepo.baseurl = [url] newrepo.basecachedir = my.conf.cachedir newrepo.metadata_expire = 0 newrepo.timestamp_check = False newrepo.enablegroups = 1 # we want *all* metadata newrepo.mdpolicy = 'group:all' # add our new repo my.repos.add(newrepo) # enable that repo my.repos.enableRepo(repoid) # setup the repo dirs/etc my.doRepoSetup(thisrepo=repoid) # Initialize results and reporting retval = 0 if opts.testopia: run_id = testopia_create_run(opts.testopia) report = lambda case,result: testopia_report(run_id,case,result) else: report = lambda case,result: None # Check the metadata print "Checking repodata:" try: md_types = newrepo.repoXML.fileTypes() print " verifying repomd.xml with yum" except yum.Errors.RepoError: print " failed to load repomd.xml." report('REPODATA','FAILED') report('CORE_PACKAGES','BLOCKED') report('COMPS','BLOCKED') return retval | BAD_REPOMD for md_type in md_types: try: print " verifying %s checksum" % md_type newrepo.retrieveMD(md_type) except Errors.RepoError, e: print " %s metadata missing or does not match checksum" % md_type retval = retval | BAD_METADATA
if not os.access(opts.destdir, os.W_OK) and not opts.urls: print >> sys.stderr, "Error: Cannot write to destination dir %s" % opts.destdir sys.exit(1) my = RepoTrack(opts=opts) my.doConfigSetup(fn=opts.config,init_plugins=False) # init yum, without plugins if opts.arch: archlist = [] archlist.extend(rpmUtils.arch.getArchList(opts.arch)) else: archlist = rpmUtils.arch.getArchList() # do the happy tmpdir thing if we're not root if os.geteuid() != 0 or opts.tempcache: cachedir = getCacheDir() if cachedir is None: print >> sys.stderr, "Error: Could not make cachedir, exiting" sys.exit(50) my.repos.setCacheDir(cachedir) if len(opts.repoid) > 0: myrepos = [] # find the ones we want for glob in opts.repoid: myrepos.extend(my.repos.findRepos(glob)) # disable them all for repo in my.repos.repos.values():
sys.exit(1) if not os.access(opts.destdir, os.W_OK) and not opts.urls: print >> sys.stderr, "Error: Cannot write to destination dir %s" % opts.destdir sys.exit(1) my = RepoSync(opts=opts) my.doConfigSetup(fn=opts.config, init_plugins=opts.plugins) # Force unprivileged users to have a private temporary cachedir # if they've not given an explicit cachedir if os.getuid() != 0 and not opts.cachedir: opts.tempcache = True if opts.tempcache: cachedir = getCacheDir() if cachedir is None: print >> sys.stderr, "Error: Could not make cachedir, exiting" sys.exit(50) my.repos.setCacheDir(cachedir) elif opts.cachedir: my.repos.setCacheDir(opts.cachedir) if len(opts.repoid) > 0: myrepos = [] # find the ones we want for glob in opts.repoid: myrepos.extend(my.repos.findRepos(glob))
class RepoCheck(YumUtilBase): NAME = 'repo-check' VERSION = '1.0' def __init__(self): self._checks = {} # Register checks self.registerCheck(TestCheck()) # setup the base YumUtilBase.__init__(self, RepoCheck.NAME, RepoCheck.VERSION, self._makeUsage()) self.logger = logging.getLogger("yum.verbose.cli.repo-check") # get the parser self.optparser = self.getOptionParser() self.main() def do_parser_setup(self): if hasattr(self, 'getOptionGroup' ): # check if the group option API is available parser = self.getOptionGroup() else: parser = self.optparser # Call the checks parser setup methods for check in self._checks.values(): check.doSetupParser(parser) def main(self): self.do_parser_setup() try: opts = self.doUtilConfigSetup() except yum.Errors.RepoError, e: self.logger.error(str(e)) sys.exit(50) # Check if there is anything to do. if len(self.cmds) < 1: print self.optparser.format_help() sys.exit(0) if not self.cmds[0] in self._checks: print "\nUnknown Command : %s \n" % self.cmds[0] print self.optparser.format_help() sys.exit(0) # Make it work as non root if self.conf.uid != 0: cachedir = getCacheDir() self.logger.debug('Running as non-root, using %s as cachedir' % cachedir) if cachedir is None: self.logger.error("Error: Could not make cachedir, exiting") sys.exit(50) self.repos.setCacheDir(cachedir) # Turn off cache self.conf.cache = 0 # make sure the repos know about it, too self.repos.setCache(0) # Run the checks preSetup methods if self.cmds[0] in self._checks: check = self._checks[self.cmds[0]] args = self.cmds[1:] check.doPreSetup(self, args, opts) # Setup yum (Ts, RPM db, Repo & Sack) self.doUtilYumSetup() # Run the check if self.cmds[0] in self._checks: check = self._checks[self.cmds[0]] args = self.cmds[1:] self.logger.info("Running the %s check" % self.cmds[0]) check.runCheck(self, args, opts)
def main(): parser = OptionParser() parser.usage = """ verifytree - verify that a local yum repository is consistent verifytree /path/to/repo""" parser.add_option("-a","--checkall",action="store_true",default=False, help="Check all packages in the repo") parser.add_option("-t","--testopia",action="store",type="int", help="Report results to the given testopia run number") parser.add_option("-r","--treeinfo", action="store_true", default=False, help="check the checksums of listed files in a .treeinfo file, if available") opts, args = parser.parse_args() if not args: print "Must provide a file url to the repo" sys.exit(1) # FIXME: check that "args" is a valid dir before proceeding # (exists, isdir, contains .treeinfo, etc) url = args[0] if url[0] == '/': url = 'file://' + url s = urlparse.urlsplit(url)[0] h,d = urlparse.urlsplit(url)[1:3] if s != 'file': print "Must be a file:// url or you will not like this" sys.exit(1) repoid = '%s/%s' % (h, d) repoid = repoid.replace('/', '_') # Bad things happen if we're missing a trailing slash here if url[-1] != '/': url += '/' basedir = url.replace('file://', '') # for a normal path thing my = yum.YumBase() my.conf.cachedir = getCacheDir() my.repos.disableRepo('*') newrepo = yum.yumRepo.YumRepository(repoid) newrepo.name = repoid newrepo.baseurl = [url] newrepo.basecachedir = my.conf.cachedir newrepo.metadata_expire = 0 newrepo.timestamp_check = False newrepo.enablegroups = 1 # we want *all* metadata newrepo.mdpolicy = 'group:all' # add our new repo my.repos.add(newrepo) # enable that repo my.repos.enableRepo(repoid) # setup the repo dirs/etc my.doRepoSetup(thisrepo=repoid) # Initialize results and reporting retval = 0 if opts.testopia: run_id = testopia_create_run(opts.testopia) report = lambda case,result: testopia_report(run_id,case,result) else: report = lambda case,result: None # Check the metadata print "Checking repodata:" try: md_types = newrepo.repoXML.fileTypes() print " verifying repomd.xml with yum" except yum.Errors.RepoError: print " failed to load repomd.xml." report('REPODATA','FAILED') report('CORE_PACKAGES','BLOCKED') report('COMPS','BLOCKED') return retval | BAD_REPOMD for md_type in md_types: try: print " verifying %s checksum" % md_type newrepo.retrieveMD(md_type) except Errors.RepoError, e: print " %s metadata missing or does not match checksum" % md_type retval = retval | BAD_METADATA