def __init__(self, config, source, buildroot='', reponame="default", nc=None): """Exclude packages matching whitespace-separated excludes. Use reponame for cache subdirectory name and pkg["yumreponame"]. Load PGP keys from URLs in key_urls.""" memorydb.RpmMemoryDB.__init__(self, config, source, buildroot) self.reponame = reponame self.excludes = self.config.excludes[:] self.mirrorlist = None self.baseurls = None self.yumconf = None self.key_urls = [] if nc: self.nc = nc else: self.nc = NetworkCache([], self.config.cachedir, self.reponame) if isinstance(source, types.DictType): found_urls = False self.yumconf = source if self.yumconf.has_key("main"): sec = self.yumconf["main"] if sec.has_key("exclude"): self.excludes.extend(sec["exclude"]) sec = self.yumconf[self.reponame] if sec.has_key("exclude"): self.excludes.extend(sec["exclude"]) if sec.has_key("gpgkey"): self.key_urls = sec["gpgkey"] if sec.has_key("baseurl"): self.nc.addCache(sec["baseurl"], self.reponame) found_urls = True if sec.has_key("mirrorlist"): self.mirrorlist = sec["mirrorlist"] found_urls = True if not found_urls: raise ValueError, "yum.conf is missing mirrorlist or baseurl parameter" else: self.baseurls = source self.nc.addCache(self.baseurls, self.reponame) self.repomd = None self.filelist_imported = 0 # Files included in primary.xml self._filerc = re.compile('^(.*bin/.*|/etc/.*|/usr/lib/sendmail)$') self._dirrc = re.compile('^(.*bin/.*|/etc/.*)$') self.comps = None
class RpmRepoDB(memorydb.RpmMemoryDB): """A (mostly) read-only RPM database storage in repodata XML. This is not a full implementation of Database: notably the file database is not populated at all.""" # A mapping between strings and RPMSENSE_* comparison flags flagmap = { 0 : None, None: 0, "EQ": RPMSENSE_EQUAL, "LT": RPMSENSE_LESS, "GT": RPMSENSE_GREATER, "LE": RPMSENSE_EQUAL | RPMSENSE_LESS, "GE": RPMSENSE_EQUAL | RPMSENSE_GREATER, RPMSENSE_EQUAL: "EQ", RPMSENSE_LESS: "LT", RPMSENSE_GREATER: "GT", RPMSENSE_EQUAL | RPMSENSE_LESS: "LE", RPMSENSE_EQUAL | RPMSENSE_GREATER: "GE"} def __init__(self, config, source, buildroot='', reponame="default", nc=None): """Exclude packages matching whitespace-separated excludes. Use reponame for cache subdirectory name and pkg["yumreponame"]. Load PGP keys from URLs in key_urls.""" memorydb.RpmMemoryDB.__init__(self, config, source, buildroot) self.reponame = reponame self.excludes = self.config.excludes[:] self.mirrorlist = None self.baseurls = None self.yumconf = None self.key_urls = [] if nc: self.nc = nc else: self.nc = NetworkCache([], self.config.cachedir, self.reponame) if isinstance(source, types.DictType): found_urls = False self.yumconf = source if self.yumconf.has_key("main"): sec = self.yumconf["main"] if sec.has_key("exclude"): self.excludes.extend(sec["exclude"]) sec = self.yumconf[self.reponame] if sec.has_key("exclude"): self.excludes.extend(sec["exclude"]) if sec.has_key("gpgkey"): self.key_urls = sec["gpgkey"] if sec.has_key("baseurl"): self.nc.addCache(sec["baseurl"], self.reponame) found_urls = True if sec.has_key("mirrorlist"): self.mirrorlist = sec["mirrorlist"] found_urls = True if not found_urls: raise ValueError, "yum.conf is missing mirrorlist or baseurl parameter" else: self.baseurls = source self.nc.addCache(self.baseurls, self.reponame) self.repomd = None self.filelist_imported = 0 # Files included in primary.xml self._filerc = re.compile('^(.*bin/.*|/etc/.*|/usr/lib/sendmail)$') self._dirrc = re.compile('^(.*bin/.*|/etc/.*)$') self.comps = None def readMirrorList(self): if not self.is_read and self.mirrorlist and self.yumconf: fname = self.nc.cache(self.mirrorlist, 1) if fname: lines = open(fname).readlines() os.unlink(fname) else: lines = [] for l in lines: l = l.strip() l = l.replace("$ARCH", "$basearch") l = self.yumconf.replaceVars(l) if l and l[0] != "#": self.nc.addCache([l,]) def getExcludes(self): return self.excludes def getMirrorList(self): return self.mirrorlist def isIdentitySave(self): """return if package objects that are added are in the db afterwards (.__contains__() returns True and the object are return from searches) """ return False def readRepoMD(self): # First we try and read the repomd file as a starting point. filename = self.nc.cache("repodata/repomd.xml", 1) if not filename: log.error("Couldn't open repomd.xml") return 0 try: fd = open(filename) ip = iterparse(fd, events=("start","end")) ip = iter(ip) except IOError: log.error("Couldn't parse repomd.xml") return 0 # Create our network cache object self.repomd = self._parse(ip) return 1 def readComps(self): # Try to read a comps.xml file if there is any before we parse the # primary.xml if self.repomd.has_key("group"): if not self.repomd["group"].has_key("location"): log.error("Couldn't find proper location for comps.xml in repomd") return 0 comps = self.repomd["group"]["location"] (csum, destfile) = self.nc.checksum(comps, "sha") if self.repomd["group"].has_key("checksum") and \ csum == self.repomd["group"]["checksum"]: filename = destfile else: filename = self.nc.cache(comps, 1) if not filename: return 0 try: self.comps = RpmCompsXML(self.config, filename) self.comps.read() except IOError: return 0 return 1 def readPrimary(self): # If we have either a local cache of the primary.xml.gz file or if # it is already local (nfs or local file system) we calculate it's # checksum and compare it with the one from repomd. If they are # the same we don't need to cache it again and can directly use it. if self.repomd.has_key("primary"): if not self.repomd["primary"].has_key("location"): return 0 primary = self.repomd["primary"]["location"] (csum, destfile) = self.nc.checksum(primary, "sha") if self.repomd["primary"].has_key("checksum") and \ csum == self.repomd["primary"]["checksum"]: filename = destfile else: filename = self.nc.cache(primary, 1) if not filename: return 0 try: fd = PyGZIP(filename) ip = iterparse(fd, events=("start","end")) ip = iter(ip) except IOError: log.error("Couldn't parse primary.xml") return 0 self._parse(ip) return 1 def readPGPKeys(self): for url in self.key_urls: filename = self.nc.cache(url, 1) try: f = file(filename) key_data = f.read() f.close() except Exception, e: log.error("Error reading GPG key %s: %s", filename, e) continue try: key_data = openpgp.isolateASCIIArmor(key_data) keys = openpgp.parsePGPKeys(key_data) except Exception, e: log.error("Invalid GPG key %s: %s", url, e) continue for k in keys: self.keyring.addKey(k)
def getOperations(self, resolver=None, installdb=None, erasedb=None): """Return an ordered list of (operation, RpmPackage). If resolver is None, use packages previously added to be acted upon; otherwise use the operations from resolver. Return None on error (after warning the user). New packages to be acted upon can't be added after calling this function, neither before nor after performing the current operations.""" if self.config.timer: time1 = clock() self.__preprocess() # hack: getOperations() is called from yum.py where deps are already # and from scripts/pyrpminstall where we still need todo dep checking. # This should get re-worked to clean this up. nodeps = 1 if resolver == None: nodeps = 0 db = self.db.getMemoryCopy() resolver = RpmResolver(self.config, db) for r in self.rpms: # Ignore errors from RpmResolver, the functions already warn # the user if necessary. if self.operation == OP_INSTALL: resolver.install(r) elif self.operation == OP_UPDATE: resolver.update(r) elif self.operation == OP_FRESHEN: resolver.freshen(r) elif self.operation == OP_ERASE: resolver.erase(r) else: log.error("Unknown operation") del self.rpms if not self.config.nodeps and not nodeps and resolver.resolve() != 1: return None if self.config.timer: log.info2("resolver took %s seconds", (clock() - time1)) time1 = clock() log.info2("Ordering transaction...") orderer = RpmOrderer(self.config, resolver.installs, resolver.updates, resolver.obsoletes, resolver.erases, installdb=installdb, erasedb=erasedb) del resolver operations = orderer.order() if operations is None: # Currently can't happen log.error("Errors found during package dependency " "checks and ordering.") return None # replace repodb pkgs by binaryrpm instances for idx, (op, pkg) in enumerate(operations): if op in (OP_UPDATE, OP_INSTALL, OP_FRESHEN): nc = None if not self.config.nocache and \ (pkg.source.startswith("http://") or \ pkg.source.startswith("https://") or \ pkg.yumrepo != None): if pkg.yumrepo != None: nc = pkg.yumrepo.getNetworkCache() else: nc = NetworkCache( "/", os.path.join(self.config.cachedir, "default")) p = package.RpmPackage(pkg.config, pkg.source, pkg.verifySignature, db=self.db) if pkg.yumrepo != None: p.source = os.path.join( pkg.yumrepo.getNetworkCache().getBaseURL(), p.source) p.nc = nc p.yumhref = pkg.yumhref p.issrc = pkg.issrc # copy NEVRA for tag in self.config.nevratags: p[tag] = pkg[tag] operations[idx] = (op, p) if self.config.timer: log.info2("orderer took %s seconds", (clock() - time1)) del orderer if not self.config.ignoresize: if self.config.timer: time1 = clock() ret = getFreeCachespace(self.config, operations) if self.config.timer: log.info2("getFreeCachespace took %s seconds", (clock() - time1)) if not ret: return None if self.config.timer: time1 = clock() ret = getFreeDiskspace(self.config, operations) if self.config.timer: log.info2("getFreeDiskspace took %s seconds", (clock() - time1)) if not ret: return None return operations
def load(self, ks, dir, beta_key_verify=False): self.dir = dir self.exclude = None # mount source to dir if ks.has_key("cdrom"): self.url = mount_cdrom(dir) if ks["cdrom"].has_key("exclude"): self.exclude = ks["cdrom"]["exclude"] elif ks.has_key("nfs"): opts = None if ks["nfs"].has_key("opts"): opts = ks["nfs"]["opts"] self.url = mount_nfs("nfs://%s:%s" % \ (ks["nfs"]["server"], ks["nfs"]["dir"]), dir, options=opts) if ks["nfs"].has_key("exclude"): self.exclude = ks["nfs"]["exclude"] else: self.url = ks["url"]["url"] if ks["url"].has_key("exclude"): self.exclude = ks["url"]["exclude"] # create network cache self.cache = NetworkCache([ self.url ], cachedir=rpmconfig.cachedir) # get source information via .discinfo file if not self.cache.cache(".discinfo"): log.error("No .discinfo for '%s'", self.url) return 0 di = get_discinfo(self.cache.cache(".discinfo")) if not di: log.error("Getting .discinfo for '%s' failed.", self.url) return 0 (self.name, self.version, self.arch) = di if self.name.startswith("Red Hat Enterprise Linux"): self.variant = self.name[24:].strip() self.id = "RHEL" self.prefix = "RedHat" elif self.name.startswith("Fedora"): self.variant = "" self.id = "FC" self.prefix = "Fedora" else: log.error("Unknown source '%s'.", self.name) return 0 self.release = "%s-%s" % (self.id, self.version) log.info1("Installation source: %s %s [%s]", self.name, self.version, self.arch) # load repos repos = [ ] self.yumconf = YumConf(self.version, self.arch, rpmdb=None, filenames=[ ], reposdirs=[ ]) if self.isRHEL() and self.cmpVersion("4.9") >= 0: # RHEL-5 key = None skip = False if ks.has_key("key"): key = ks["key"].keys()[0] if ks["key"][key].has_key("skip"): skip = True key = None inum = None if key and not skip and not beta_key_verify: if not instnum: log.warning("Unable to verify installation key, " "module instkey is missing.. using default" "installation.") else: try: inum = instnum.InstNum(key) except: log.error("Installation key '%s' is not valid.", key) return 0 if inum: if inum.get_product_string().lower() != \ self.variant.lower(): log.error("Installation key for '%s' does not match " "'%s' media.", inum.get_product_string().lower(), self.variant.lower()) return 0 for name, path in inum.get_repos_dict().items(): if path == "VT" and \ not self.arch in [ "i386", "x86_64", "ia64" ]: continue repos.append(path) else: # BETA if self.variant == "Server": repos.append("Server") if key and key.find("C") >= 0: repos.append("Cluster") if key and key.find("S") >= 0: repos.append("ClusterStorage") elif self.variant == "Client": repos.append("Client") if key and key.find("W") >= 0: repos.append("Workstation") if self.arch in [ "i386", "x86_64", "ia64" ]: if key and key.find("V") >= 0: repos.append("VT") for repo in repos: repo_name = "%s-%s" % (self.release, repo) if repo in self.repos: log.error("Repository '%s' already defined.", repo_name) return 0 log.info1("Loading repo '%s'", repo_name) # create yumconf self.yumconf[repo_name] = { } self.yumconf[repo_name]["baseurl"] = [ "%s/%s" % (self.url, repo) ] if self.exclude: self.yumconf[repo_name]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo_name) self.repos[repo_name] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo_name) return 0 else: # RHEL <= 4 # FC repo = self.release self.yumconf[repo] = { } self.yumconf[repo]["baseurl"] = [ self.url ] if self.exclude: self.yumconf[repo]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 if not _repo.comps: # every source repo has to have a comps log.error("Missing comps file for '%s'.", repo) return 0 self.base_repo_names = self.repos.keys() if not ks.has_key("repo"): return 1 for repo in ks["repo"]: if repo in self.repos: log.error("Repository '%s' already defined.", repo) return 0 log.info1("Loading repository '%s'", repo) self.yumconf[repo] = { } url = ks["repo"][repo]["baseurl"] if url[:6] == "nfs://": d = "%s/%s" % (dir, repo) create_dir("", d) url = mount_nfs(url, d) self.yumconf[repo]["baseurl"] = [ url ] if ks["repo"][repo].has_key("exclude"): self.yumconf[repo]["exclude"] = ks["repo"][repo]["exclude"] if ks["repo"][repo].has_key("mirrorlist"): self.yumconf[repo]["mirrorlist"] = \ ks["repo"][repo]["mirrorlist"] _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 return 1
class Source: """ Load Source repo and extra repos according to kickstart configuration. """ def __init__(self): self.repos = { } self.base_repo_names = [ ] self.mounts = { } def load(self, ks, dir, beta_key_verify=False): self.dir = dir self.exclude = None # mount source to dir if ks.has_key("cdrom"): self.url = mount_cdrom(dir) if ks["cdrom"].has_key("exclude"): self.exclude = ks["cdrom"]["exclude"] elif ks.has_key("nfs"): opts = None if ks["nfs"].has_key("opts"): opts = ks["nfs"]["opts"] self.url = mount_nfs("nfs://%s:%s" % \ (ks["nfs"]["server"], ks["nfs"]["dir"]), dir, options=opts) if ks["nfs"].has_key("exclude"): self.exclude = ks["nfs"]["exclude"] else: self.url = ks["url"]["url"] if ks["url"].has_key("exclude"): self.exclude = ks["url"]["exclude"] # create network cache self.cache = NetworkCache([ self.url ], cachedir=rpmconfig.cachedir) # get source information via .discinfo file if not self.cache.cache(".discinfo"): log.error("No .discinfo for '%s'", self.url) return 0 di = get_discinfo(self.cache.cache(".discinfo")) if not di: log.error("Getting .discinfo for '%s' failed.", self.url) return 0 (self.name, self.version, self.arch) = di if self.name.startswith("Red Hat Enterprise Linux"): self.variant = self.name[24:].strip() self.id = "RHEL" self.prefix = "RedHat" elif self.name.startswith("Fedora"): self.variant = "" self.id = "FC" self.prefix = "Fedora" else: log.error("Unknown source '%s'.", self.name) return 0 self.release = "%s-%s" % (self.id, self.version) log.info1("Installation source: %s %s [%s]", self.name, self.version, self.arch) # load repos repos = [ ] self.yumconf = YumConf(self.version, self.arch, rpmdb=None, filenames=[ ], reposdirs=[ ]) if self.isRHEL() and self.cmpVersion("4.9") >= 0: # RHEL-5 key = None skip = False if ks.has_key("key"): key = ks["key"].keys()[0] if ks["key"][key].has_key("skip"): skip = True key = None inum = None if key and not skip and not beta_key_verify: if not instnum: log.warning("Unable to verify installation key, " "module instkey is missing.. using default" "installation.") else: try: inum = instnum.InstNum(key) except: log.error("Installation key '%s' is not valid.", key) return 0 if inum: if inum.get_product_string().lower() != \ self.variant.lower(): log.error("Installation key for '%s' does not match " "'%s' media.", inum.get_product_string().lower(), self.variant.lower()) return 0 for name, path in inum.get_repos_dict().items(): if path == "VT" and \ not self.arch in [ "i386", "x86_64", "ia64" ]: continue repos.append(path) else: # BETA if self.variant == "Server": repos.append("Server") if key and key.find("C") >= 0: repos.append("Cluster") if key and key.find("S") >= 0: repos.append("ClusterStorage") elif self.variant == "Client": repos.append("Client") if key and key.find("W") >= 0: repos.append("Workstation") if self.arch in [ "i386", "x86_64", "ia64" ]: if key and key.find("V") >= 0: repos.append("VT") for repo in repos: repo_name = "%s-%s" % (self.release, repo) if repo in self.repos: log.error("Repository '%s' already defined.", repo_name) return 0 log.info1("Loading repo '%s'", repo_name) # create yumconf self.yumconf[repo_name] = { } self.yumconf[repo_name]["baseurl"] = [ "%s/%s" % (self.url, repo) ] if self.exclude: self.yumconf[repo_name]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo_name) self.repos[repo_name] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo_name) return 0 else: # RHEL <= 4 # FC repo = self.release self.yumconf[repo] = { } self.yumconf[repo]["baseurl"] = [ self.url ] if self.exclude: self.yumconf[repo]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 if not _repo.comps: # every source repo has to have a comps log.error("Missing comps file for '%s'.", repo) return 0 self.base_repo_names = self.repos.keys() if not ks.has_key("repo"): return 1 for repo in ks["repo"]: if repo in self.repos: log.error("Repository '%s' already defined.", repo) return 0 log.info1("Loading repository '%s'", repo) self.yumconf[repo] = { } url = ks["repo"][repo]["baseurl"] if url[:6] == "nfs://": d = "%s/%s" % (dir, repo) create_dir("", d) url = mount_nfs(url, d) self.yumconf[repo]["baseurl"] = [ url ] if ks["repo"][repo].has_key("exclude"): self.yumconf[repo]["exclude"] = ks["repo"][repo]["exclude"] if ks["repo"][repo].has_key("mirrorlist"): self.yumconf[repo]["mirrorlist"] = \ ks["repo"][repo]["mirrorlist"] _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 return 1 def getStage2(self): if self.isRHEL() and self.cmpVersion("4.9") >= 0: return self.cache.cache("images/stage2.img") else: if self.cmpVersion("6") < 0: return self.cache.cache("%s/base/stage2.img" % self.prefix) else: return self.cache.cache("images/stage2.img") def getPackages(self, ks, languages, all_comps, has_raid, fstypes): groups = [ ] pkgs = [ ] everything = False if ks.has_key("packages") and \ ks["packages"].has_key("groups") and \ len(ks["packages"]["groups"]) > 0: groups = ks["packages"]["groups"] # add default group "base" and "core if it is not in groups and # nobase is not set if not ks.has_key("packages") or not ks["packages"].has_key("nobase"): if not "base" in groups: groups.append("base") if not "core" in groups: groups.append("core") if all_comps: repos = self.repos.keys() else: repos = self.base_repo_names if "everything" in groups: for repo in repos: for group in self.repos[repo].comps.getGroups(): if not group in groups: groups.append(group) groups.remove("everything") everything = True if ks.has_key("packages") and ks["packages"].has_key("add") and \ "*" in ks["packages"]["add"]: # add all packages for repo in self.repos.keys(): pkgs.extend(self.repos[repo].getNames()) else: # add default desktop if ks.has_key("xconfig"): if ks["xconfig"].has_key("startxonboot"): if not "base-x" in groups: log.info1("Adding group 'base-x'.") groups.append("base-x") desktop = "GNOME" if ks["xconfig"].has_key("defaultdesktop"): desktop = ks["xconfig"]["defaultdesktop"] desktop = "%s-desktop" % desktop.lower() if not desktop in groups: log.info1("Adding group '%s'.", desktop) groups.append(desktop) normalizeList(groups) # test if groups are available repo_groups = { } for group in groups: found = False for repo in repos: if not self.repos[repo].comps: continue _group = self.repos[repo].comps.getGroup(group) if not _group: continue found = True if not _group in repo_groups.keys() or \ not repo in repo_groups[_group]: repo_groups.setdefault(_group, [ ]).append(repo) if not found: log.warning("Group '%s' does not exist.", group) del groups # add packages for groups for group in repo_groups: for repo in repo_groups[group]: comps = self.repos[repo].comps for pkg in comps.getPackageNames(group): if len(self.repos[repo].searchPkgs([pkg])) > 0: if not pkg in pkgs: pkgs.append(pkg) if everything: # add all packages in this group for (pkg, req) in \ comps.getConditionalPackageNames(group): if len(self.repos[repo].searchPkgs([pkg])) > 0: if not pkg in pkgs: pkgs.append(pkg) del repo_groups # add packages if ks.has_key("packages") and ks["packages"].has_key("add"): for name in ks["packages"]["add"]: if name == "*": continue found = False for repo in self.repos.keys(): _pkgs = self.repos[repo].searchPkgs([name]) if len(_pkgs) > 0: # silently add package if not name in pkgs: pkgs.append(name) found = True break if not found: log.warning("Package '%s' is not available.", pkg) # remove packages if ks.has_key("packages") and ks["packages"].has_key("drop"): for pkg in ks["packages"]["drop"]: if pkg in pkgs: log.info1("Removing package '%s'.", pkg) pkgs.remove(pkg) # add xorg driver package for past FC-5, RHEL-4 if ks.has_key("xconfig"): if (self.isRHEL() and self.cmpVersion("4.9") > 0) or \ (self.isFedora() and self.cmpVersion("4") > 0): if ks["xconfig"].has_key("driver"): self._addPkg("xorg-x11-drv-%s" % ks["xconfig"]["driver"], pkgs) else: if not "xorg-x11-drivers" in pkgs: self._addPkg("xorg-x11-drivers", pkgs) # add packages for needed filesystem types for fstype in fstypes: if fstype == "swap": continue self._addPkgByFilename("/sbin/mkfs.%s" % fstype, pkgs, "%s filesystem creation" % fstype) # add comps package if not "comps" in pkgs: try: self._addPkg("comps", pkgs) except: # ignore missing comps package pass # append mdadm if has_raid: self._addPkgByFilename("/sbin/mdadm", pkgs, "raid configuration") # append authconfig if ks.has_key("authconfig"): self._addPkgByFilename("/usr/sbin/authconfig", pkgs, "authentication configuration") # append iptables and config tool if ks.has_key("firewall") and \ not ks["firewall"].has_key("disabled"): self._addPkg("iptables", pkgs) # no firewall config tool in RHEL-3 if (self.isRHEL() and self.cmpVersion("4") >= 0) or \ (self.isFedora() and self.cmpVersion("3") >= 0): self._addPkgByFilename("/usr/sbin/lokkit", pkgs, "firewall configuration") # append lokkit if ks.has_key("selinux") and \ ((self.isRHEL() and self.cmpVersion("4") >= 0) or \ (self.isFedora() and self.cmpVersion("3") >= 0)): self._addPkgByFilename("/usr/sbin/lokkit", pkgs, "selinux configuration") # append kernel if not "kernel" in pkgs and not "kernel-smp" in pkgs: self._addPkg("kernel", pkgs) # append kernel-devel for FC-6 and RHEL-5 if "gcc" in pkgs and \ ((self.isRHEL() and self.cmpVersion("5") >= 0 and \ (self.getVariant() != "Client" or \ "%s-Workstation" % (self.release) in self.repos.keys())) or \ (self.isFedora() and self.cmpVersion("6") >= 0)): if "kernel" in pkgs: self._addPkg("kernel-devel", pkgs) elif "kernel-smp" in pkgs: self._addPkg("kernel-smp-devel", pkgs) # append firstboot if ks.has_key("firstboot") and \ not ks["firstboot"].has_key("disabled"): self._addPkg("firstboot", pkgs) # append dhclient if ks.has_key("bootloader"): self._addPkg("grub", pkgs) # if self.getArch() == "ia64": # self._addPkg("elilo", pkgs) # elif self.getArch in [ "s390", "s390x" ]: # self._addPkg("s390utils", pkgs) # elif self.getArch() in [ "ppc", "ppc64" ]: # self._addPkg("yaboot", pkgs) # else: # self._addPkg("grub", pkgs) # append grub if ks.has_key("network") and len(ks["network"]) > 0: for net in ks["network"]: if net["bootproto"] == "dhcp": self._addPkg("dhclient", pkgs) # languages (pre FC-6 and pre RHEL-5) if len(languages) > 0: for repo in repos: _repo = self.repos[repo] if not _repo.comps: continue for group in _repo.comps.grouphash.keys(): self._compsLangsupport(pkgs, _repo.comps, languages, group) normalizeList(pkgs) return pkgs def _addPkg(self, name, pkgs, description=""): for repo in self.repos: _pkgs = self.repos[repo].searchPkgs([name]) if len(_pkgs) > 0: if not name in pkgs: if description != "": log.info1("Adding package '%s' for %s.", name, description) else: log.info1("Adding package '%s'.", name) pkgs.append(name) return if description != "": raise ValueError, "Could not find package '%s'" % (name) + \ "needed for %s." % (description) else: raise ValueError, "Could not find package '%s'" % (name) def _addPkgByFilename(self, name, pkgs, description=""): for repo in self.repos: s = self.repos[repo].searchFilenames(name) if len(s) < 1: # import file list if not already imported and search again if not self.repos[repo]._matchesFile(name) and \ not self.repos[repo].isFilelistImported(): self.repos[repo].importFilelist() s = self.repos[repo].searchFilenames(name) if len(s) < 1: continue for pkg in s: if pkg["name"] in pkgs: # package is already in list return pkg = s[0] # take first package, which provides ... if description != "": log.info1("Adding package '%s' for %s.", pkg["name"], description) else: log.info1("Adding package '%s'.", pkg["name"]) pkgs.append(pkg["name"]) return if description != "": raise ValueError, "Could not find package providing '%s'" % \ (name) + "needed for %s." % (description) else: raise ValueError, "Could not find package providing '%s'" % (name) def _compsLangsupport(self, pkgs, comps, languages, group): if not comps.grouphash.has_key(group): return if not comps.grouphash[group].has_key("langonly") or \ not comps.grouphash[group]["langonly"] in languages: return # add grouplist if comps.grouphash[group].has_key("grouplist"): for _group in comps.grouphash[group]["grouplist"]["groupreqs"]: self._compsLangsupport(pkgs, comps, languages, _group) for _group in comps.grouphash[group]["grouplist"]["metapkgs"]: self._compsLangsupport(pkgs, comps, languages, _group) for name in comps.getPackageNames(group): self._addPkg(name, pkgs, "langsupport") # old style conditional optional_list = comps.getOptionalPackageNames(group) for (name, requires) in optional_list: if name in pkgs: continue for req in requires: if req in pkgs: log.info1("Adding package '%s' for langsupport.", name) pkgs.append(name) break # new style conditional conditional_list = comps.getConditionalPackageNames(group) for (name, requires) in conditional_list: if name in pkgs: continue for req in requires: if req in pkgs: log.info1("Adding package '%s' for langsupport.", name) pkgs.append(name) break def getYumConf(self): ret = "" for repo in self.repos: ret += "[%s]\n" % repo ret += "name=%s\n" % repo _repo = self.repos[repo] baseurls = _repo.nc.baseurls[_repo.reponame] if len(baseurls) > 0: ret += "baseurl=%s\n" % " ".join(baseurls) if _repo.excludes: ret += "exclude=%s\n" % " ".join(_repo.excludes) if _repo.mirrorlist: ret += "mirrorlist=%s\n" % " ".join(_repo.mirrorlist) ret += "\n" return ret def close(self): for repo in self.repos: self.repos[repo].close() self.repos.clear() def cleanup(self): self.close() for mount in self.mounts: self.mounts[mount].umount() def isRHEL(self): return self.id == "RHEL" def isFedora(self): return self.id == "FC" def getName(self): return self.name def getVersion(self): return self.version def getArch(self): return self.arch def cmpVersion(self, other): if self.getVersion() == "development": return 1 return stringCompare(self.getVersion(), other) def getRelease(self): return self.release def getVariant(self): return self.variant
def load(self, ks, dir, beta_key_verify=False): self.dir = dir self.exclude = None # mount source to dir if ks.has_key("cdrom"): self.url = mount_cdrom(dir) if ks["cdrom"].has_key("exclude"): self.exclude = ks["cdrom"]["exclude"] elif ks.has_key("nfs"): opts = None if ks["nfs"].has_key("opts"): opts = ks["nfs"]["opts"] self.url = mount_nfs("nfs://%s:%s" % \ (ks["nfs"]["server"], ks["nfs"]["dir"]), dir, options=opts) if ks["nfs"].has_key("exclude"): self.exclude = ks["nfs"]["exclude"] else: self.url = ks["url"]["url"] if ks["url"].has_key("exclude"): self.exclude = ks["url"]["exclude"] # create network cache self.cache = NetworkCache([self.url], cachedir=rpmconfig.cachedir) # get source information via .discinfo file if not self.cache.cache(".discinfo"): log.error("No .discinfo for '%s'", self.url) return 0 di = get_discinfo(self.cache.cache(".discinfo")) if not di: log.error("Getting .discinfo for '%s' failed.", self.url) return 0 (self.name, self.version, self.arch) = di if self.name.startswith("Red Hat Enterprise Linux"): self.variant = self.name[24:].strip() self.id = "RHEL" self.prefix = "RedHat" elif self.name.startswith("Fedora"): self.variant = "" self.id = "FC" self.prefix = "Fedora" else: log.error("Unknown source '%s'.", self.name) return 0 self.release = "%s-%s" % (self.id, self.version) log.info1("Installation source: %s %s [%s]", self.name, self.version, self.arch) # load repos repos = [] self.yumconf = YumConf(self.version, self.arch, rpmdb=None, filenames=[], reposdirs=[]) if self.isRHEL() and self.cmpVersion("4.9") >= 0: # RHEL-5 key = None skip = False if ks.has_key("key"): key = ks["key"].keys()[0] if ks["key"][key].has_key("skip"): skip = True key = None inum = None if key and not skip and not beta_key_verify: if not instnum: log.warning("Unable to verify installation key, " "module instkey is missing.. using default" "installation.") else: try: inum = instnum.InstNum(key) except: log.error("Installation key '%s' is not valid.", key) return 0 if inum: if inum.get_product_string().lower() != \ self.variant.lower(): log.error( "Installation key for '%s' does not match " "'%s' media.", inum.get_product_string().lower(), self.variant.lower()) return 0 for name, path in inum.get_repos_dict().items(): if path == "VT" and \ not self.arch in [ "i386", "x86_64", "ia64" ]: continue repos.append(path) else: # BETA if self.variant == "Server": repos.append("Server") if key and key.find("C") >= 0: repos.append("Cluster") if key and key.find("S") >= 0: repos.append("ClusterStorage") elif self.variant == "Client": repos.append("Client") if key and key.find("W") >= 0: repos.append("Workstation") if self.arch in ["i386", "x86_64", "ia64"]: if key and key.find("V") >= 0: repos.append("VT") for repo in repos: repo_name = "%s-%s" % (self.release, repo) if repo in self.repos: log.error("Repository '%s' already defined.", repo_name) return 0 log.info1("Loading repo '%s'", repo_name) # create yumconf self.yumconf[repo_name] = {} self.yumconf[repo_name]["baseurl"] = [ "%s/%s" % (self.url, repo) ] if self.exclude: self.yumconf[repo_name]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo_name) self.repos[repo_name] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo_name) return 0 else: # RHEL <= 4 # FC repo = self.release self.yumconf[repo] = {} self.yumconf[repo]["baseurl"] = [self.url] if self.exclude: self.yumconf[repo]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 if not _repo.comps: # every source repo has to have a comps log.error("Missing comps file for '%s'.", repo) return 0 self.base_repo_names = self.repos.keys() if not ks.has_key("repo"): return 1 for repo in ks["repo"]: if repo in self.repos: log.error("Repository '%s' already defined.", repo) return 0 log.info1("Loading repository '%s'", repo) self.yumconf[repo] = {} url = ks["repo"][repo]["baseurl"] if url[:6] == "nfs://": d = "%s/%s" % (dir, repo) create_dir("", d) url = mount_nfs(url, d) self.yumconf[repo]["baseurl"] = [url] if ks["repo"][repo].has_key("exclude"): self.yumconf[repo]["exclude"] = ks["repo"][repo]["exclude"] if ks["repo"][repo].has_key("mirrorlist"): self.yumconf[repo]["mirrorlist"] = \ ks["repo"][repo]["mirrorlist"] _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 return 1
class Source: """ Load Source repo and extra repos according to kickstart configuration. """ def __init__(self): self.repos = {} self.base_repo_names = [] self.mounts = {} def load(self, ks, dir, beta_key_verify=False): self.dir = dir self.exclude = None # mount source to dir if ks.has_key("cdrom"): self.url = mount_cdrom(dir) if ks["cdrom"].has_key("exclude"): self.exclude = ks["cdrom"]["exclude"] elif ks.has_key("nfs"): opts = None if ks["nfs"].has_key("opts"): opts = ks["nfs"]["opts"] self.url = mount_nfs("nfs://%s:%s" % \ (ks["nfs"]["server"], ks["nfs"]["dir"]), dir, options=opts) if ks["nfs"].has_key("exclude"): self.exclude = ks["nfs"]["exclude"] else: self.url = ks["url"]["url"] if ks["url"].has_key("exclude"): self.exclude = ks["url"]["exclude"] # create network cache self.cache = NetworkCache([self.url], cachedir=rpmconfig.cachedir) # get source information via .discinfo file if not self.cache.cache(".discinfo"): log.error("No .discinfo for '%s'", self.url) return 0 di = get_discinfo(self.cache.cache(".discinfo")) if not di: log.error("Getting .discinfo for '%s' failed.", self.url) return 0 (self.name, self.version, self.arch) = di if self.name.startswith("Red Hat Enterprise Linux"): self.variant = self.name[24:].strip() self.id = "RHEL" self.prefix = "RedHat" elif self.name.startswith("Fedora"): self.variant = "" self.id = "FC" self.prefix = "Fedora" else: log.error("Unknown source '%s'.", self.name) return 0 self.release = "%s-%s" % (self.id, self.version) log.info1("Installation source: %s %s [%s]", self.name, self.version, self.arch) # load repos repos = [] self.yumconf = YumConf(self.version, self.arch, rpmdb=None, filenames=[], reposdirs=[]) if self.isRHEL() and self.cmpVersion("4.9") >= 0: # RHEL-5 key = None skip = False if ks.has_key("key"): key = ks["key"].keys()[0] if ks["key"][key].has_key("skip"): skip = True key = None inum = None if key and not skip and not beta_key_verify: if not instnum: log.warning("Unable to verify installation key, " "module instkey is missing.. using default" "installation.") else: try: inum = instnum.InstNum(key) except: log.error("Installation key '%s' is not valid.", key) return 0 if inum: if inum.get_product_string().lower() != \ self.variant.lower(): log.error( "Installation key for '%s' does not match " "'%s' media.", inum.get_product_string().lower(), self.variant.lower()) return 0 for name, path in inum.get_repos_dict().items(): if path == "VT" and \ not self.arch in [ "i386", "x86_64", "ia64" ]: continue repos.append(path) else: # BETA if self.variant == "Server": repos.append("Server") if key and key.find("C") >= 0: repos.append("Cluster") if key and key.find("S") >= 0: repos.append("ClusterStorage") elif self.variant == "Client": repos.append("Client") if key and key.find("W") >= 0: repos.append("Workstation") if self.arch in ["i386", "x86_64", "ia64"]: if key and key.find("V") >= 0: repos.append("VT") for repo in repos: repo_name = "%s-%s" % (self.release, repo) if repo in self.repos: log.error("Repository '%s' already defined.", repo_name) return 0 log.info1("Loading repo '%s'", repo_name) # create yumconf self.yumconf[repo_name] = {} self.yumconf[repo_name]["baseurl"] = [ "%s/%s" % (self.url, repo) ] if self.exclude: self.yumconf[repo_name]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo_name) self.repos[repo_name] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo_name) return 0 else: # RHEL <= 4 # FC repo = self.release self.yumconf[repo] = {} self.yumconf[repo]["baseurl"] = [self.url] if self.exclude: self.yumconf[repo]["exclude"] = self.exclude _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 if not _repo.comps: # every source repo has to have a comps log.error("Missing comps file for '%s'.", repo) return 0 self.base_repo_names = self.repos.keys() if not ks.has_key("repo"): return 1 for repo in ks["repo"]: if repo in self.repos: log.error("Repository '%s' already defined.", repo) return 0 log.info1("Loading repository '%s'", repo) self.yumconf[repo] = {} url = ks["repo"][repo]["baseurl"] if url[:6] == "nfs://": d = "%s/%s" % (dir, repo) create_dir("", d) url = mount_nfs(url, d) self.yumconf[repo]["baseurl"] = [url] if ks["repo"][repo].has_key("exclude"): self.yumconf[repo]["exclude"] = ks["repo"][repo]["exclude"] if ks["repo"][repo].has_key("mirrorlist"): self.yumconf[repo]["mirrorlist"] = \ ks["repo"][repo]["mirrorlist"] _repo = getRepoDB(rpmconfig, self.yumconf, reponame=repo) self.repos[repo] = _repo if not _repo.read(): log.error("Could not load repository '%s'.", repo) return 0 return 1 def getStage2(self): if self.isRHEL() and self.cmpVersion("4.9") >= 0: return self.cache.cache("images/stage2.img") else: if self.cmpVersion("6") < 0: return self.cache.cache("%s/base/stage2.img" % self.prefix) else: return self.cache.cache("images/stage2.img") def getPackages(self, ks, languages, all_comps, has_raid, fstypes): groups = [] pkgs = [] everything = False if ks.has_key("packages") and \ ks["packages"].has_key("groups") and \ len(ks["packages"]["groups"]) > 0: groups = ks["packages"]["groups"] # add default group "base" and "core if it is not in groups and # nobase is not set if not ks.has_key("packages") or not ks["packages"].has_key("nobase"): if not "base" in groups: groups.append("base") if not "core" in groups: groups.append("core") if all_comps: repos = self.repos.keys() else: repos = self.base_repo_names if "everything" in groups: for repo in repos: for group in self.repos[repo].comps.getGroups(): if not group in groups: groups.append(group) groups.remove("everything") everything = True if ks.has_key("packages") and ks["packages"].has_key("add") and \ "*" in ks["packages"]["add"]: # add all packages for repo in self.repos.keys(): pkgs.extend(self.repos[repo].getNames()) else: # add default desktop if ks.has_key("xconfig"): if ks["xconfig"].has_key("startxonboot"): if not "base-x" in groups: log.info1("Adding group 'base-x'.") groups.append("base-x") desktop = "GNOME" if ks["xconfig"].has_key("defaultdesktop"): desktop = ks["xconfig"]["defaultdesktop"] desktop = "%s-desktop" % desktop.lower() if not desktop in groups: log.info1("Adding group '%s'.", desktop) groups.append(desktop) normalizeList(groups) # test if groups are available repo_groups = {} for group in groups: found = False for repo in repos: if not self.repos[repo].comps: continue _group = self.repos[repo].comps.getGroup(group) if not _group: continue found = True if not _group in repo_groups.keys() or \ not repo in repo_groups[_group]: repo_groups.setdefault(_group, []).append(repo) if not found: log.warning("Group '%s' does not exist.", group) del groups # add packages for groups for group in repo_groups: for repo in repo_groups[group]: comps = self.repos[repo].comps for pkg in comps.getPackageNames(group): if len(self.repos[repo].searchPkgs([pkg])) > 0: if not pkg in pkgs: pkgs.append(pkg) if everything: # add all packages in this group for (pkg, req) in \ comps.getConditionalPackageNames(group): if len(self.repos[repo].searchPkgs([pkg])) > 0: if not pkg in pkgs: pkgs.append(pkg) del repo_groups # add packages if ks.has_key("packages") and ks["packages"].has_key("add"): for name in ks["packages"]["add"]: if name == "*": continue found = False for repo in self.repos.keys(): _pkgs = self.repos[repo].searchPkgs([name]) if len(_pkgs) > 0: # silently add package if not name in pkgs: pkgs.append(name) found = True break if not found: log.warning("Package '%s' is not available.", pkg) # remove packages if ks.has_key("packages") and ks["packages"].has_key("drop"): for pkg in ks["packages"]["drop"]: if pkg in pkgs: log.info1("Removing package '%s'.", pkg) pkgs.remove(pkg) # add xorg driver package for past FC-5, RHEL-4 if ks.has_key("xconfig"): if (self.isRHEL() and self.cmpVersion("4.9") > 0) or \ (self.isFedora() and self.cmpVersion("4") > 0): if ks["xconfig"].has_key("driver"): self._addPkg("xorg-x11-drv-%s" % ks["xconfig"]["driver"], pkgs) else: if not "xorg-x11-drivers" in pkgs: self._addPkg("xorg-x11-drivers", pkgs) # add packages for needed filesystem types for fstype in fstypes: if fstype == "swap": continue self._addPkgByFilename("/sbin/mkfs.%s" % fstype, pkgs, "%s filesystem creation" % fstype) # add comps package if not "comps" in pkgs: try: self._addPkg("comps", pkgs) except: # ignore missing comps package pass # append mdadm if has_raid: self._addPkgByFilename("/sbin/mdadm", pkgs, "raid configuration") # append authconfig if ks.has_key("authconfig"): self._addPkgByFilename("/usr/sbin/authconfig", pkgs, "authentication configuration") # append iptables and config tool if ks.has_key("firewall") and \ not ks["firewall"].has_key("disabled"): self._addPkg("iptables", pkgs) # no firewall config tool in RHEL-3 if (self.isRHEL() and self.cmpVersion("4") >= 0) or \ (self.isFedora() and self.cmpVersion("3") >= 0): self._addPkgByFilename("/usr/sbin/lokkit", pkgs, "firewall configuration") # append lokkit if ks.has_key("selinux") and \ ((self.isRHEL() and self.cmpVersion("4") >= 0) or \ (self.isFedora() and self.cmpVersion("3") >= 0)): self._addPkgByFilename("/usr/sbin/lokkit", pkgs, "selinux configuration") # append kernel if not "kernel" in pkgs and not "kernel-smp" in pkgs: self._addPkg("kernel", pkgs) # append kernel-devel for FC-6 and RHEL-5 if "gcc" in pkgs and \ ((self.isRHEL() and self.cmpVersion("5") >= 0 and \ (self.getVariant() != "Client" or \ "%s-Workstation" % (self.release) in self.repos.keys())) or \ (self.isFedora() and self.cmpVersion("6") >= 0)): if "kernel" in pkgs: self._addPkg("kernel-devel", pkgs) elif "kernel-smp" in pkgs: self._addPkg("kernel-smp-devel", pkgs) # append firstboot if ks.has_key("firstboot") and \ not ks["firstboot"].has_key("disabled"): self._addPkg("firstboot", pkgs) # append dhclient if ks.has_key("bootloader"): self._addPkg("grub", pkgs) # if self.getArch() == "ia64": # self._addPkg("elilo", pkgs) # elif self.getArch in [ "s390", "s390x" ]: # self._addPkg("s390utils", pkgs) # elif self.getArch() in [ "ppc", "ppc64" ]: # self._addPkg("yaboot", pkgs) # else: # self._addPkg("grub", pkgs) # append grub if ks.has_key("network") and len(ks["network"]) > 0: for net in ks["network"]: if net["bootproto"] == "dhcp": self._addPkg("dhclient", pkgs) # languages (pre FC-6 and pre RHEL-5) if len(languages) > 0: for repo in repos: _repo = self.repos[repo] if not _repo.comps: continue for group in _repo.comps.grouphash.keys(): self._compsLangsupport(pkgs, _repo.comps, languages, group) normalizeList(pkgs) return pkgs def _addPkg(self, name, pkgs, description=""): for repo in self.repos: _pkgs = self.repos[repo].searchPkgs([name]) if len(_pkgs) > 0: if not name in pkgs: if description != "": log.info1("Adding package '%s' for %s.", name, description) else: log.info1("Adding package '%s'.", name) pkgs.append(name) return if description != "": raise ValueError, "Could not find package '%s'" % (name) + \ "needed for %s." % (description) else: raise ValueError, "Could not find package '%s'" % (name) def _addPkgByFilename(self, name, pkgs, description=""): for repo in self.repos: s = self.repos[repo].searchFilenames(name) if len(s) < 1: # import file list if not already imported and search again if not self.repos[repo]._matchesFile(name) and \ not self.repos[repo].isFilelistImported(): self.repos[repo].importFilelist() s = self.repos[repo].searchFilenames(name) if len(s) < 1: continue for pkg in s: if pkg["name"] in pkgs: # package is already in list return pkg = s[0] # take first package, which provides ... if description != "": log.info1("Adding package '%s' for %s.", pkg["name"], description) else: log.info1("Adding package '%s'.", pkg["name"]) pkgs.append(pkg["name"]) return if description != "": raise ValueError, "Could not find package providing '%s'" % \ (name) + "needed for %s." % (description) else: raise ValueError, "Could not find package providing '%s'" % (name) def _compsLangsupport(self, pkgs, comps, languages, group): if not comps.grouphash.has_key(group): return if not comps.grouphash[group].has_key("langonly") or \ not comps.grouphash[group]["langonly"] in languages: return # add grouplist if comps.grouphash[group].has_key("grouplist"): for _group in comps.grouphash[group]["grouplist"]["groupreqs"]: self._compsLangsupport(pkgs, comps, languages, _group) for _group in comps.grouphash[group]["grouplist"]["metapkgs"]: self._compsLangsupport(pkgs, comps, languages, _group) for name in comps.getPackageNames(group): self._addPkg(name, pkgs, "langsupport") # old style conditional optional_list = comps.getOptionalPackageNames(group) for (name, requires) in optional_list: if name in pkgs: continue for req in requires: if req in pkgs: log.info1("Adding package '%s' for langsupport.", name) pkgs.append(name) break # new style conditional conditional_list = comps.getConditionalPackageNames(group) for (name, requires) in conditional_list: if name in pkgs: continue for req in requires: if req in pkgs: log.info1("Adding package '%s' for langsupport.", name) pkgs.append(name) break def getYumConf(self): ret = "" for repo in self.repos: ret += "[%s]\n" % repo ret += "name=%s\n" % repo _repo = self.repos[repo] baseurls = _repo.nc.baseurls[_repo.reponame] if len(baseurls) > 0: ret += "baseurl=%s\n" % " ".join(baseurls) if _repo.excludes: ret += "exclude=%s\n" % " ".join(_repo.excludes) if _repo.mirrorlist: ret += "mirrorlist=%s\n" % " ".join(_repo.mirrorlist) ret += "\n" return ret def close(self): for repo in self.repos: self.repos[repo].close() self.repos.clear() def cleanup(self): self.close() for mount in self.mounts: self.mounts[mount].umount() def isRHEL(self): return self.id == "RHEL" def isFedora(self): return self.id == "FC" def getName(self): return self.name def getVersion(self): return self.version def getArch(self): return self.arch def cmpVersion(self, other): if self.getVersion() == "development": return 1 return stringCompare(self.getVersion(), other) def getRelease(self): return self.release def getVariant(self): return self.variant
class RpmRepoDB(memorydb.RpmMemoryDB): """A (mostly) read-only RPM database storage in repodata XML. This is not a full implementation of Database: notably the file database is not populated at all.""" # A mapping between strings and RPMSENSE_* comparison flags flagmap = { 0: None, None: 0, "EQ": RPMSENSE_EQUAL, "LT": RPMSENSE_LESS, "GT": RPMSENSE_GREATER, "LE": RPMSENSE_EQUAL | RPMSENSE_LESS, "GE": RPMSENSE_EQUAL | RPMSENSE_GREATER, RPMSENSE_EQUAL: "EQ", RPMSENSE_LESS: "LT", RPMSENSE_GREATER: "GT", RPMSENSE_EQUAL | RPMSENSE_LESS: "LE", RPMSENSE_EQUAL | RPMSENSE_GREATER: "GE" } def __init__(self, config, source, buildroot='', reponame="default", nc=None): """Exclude packages matching whitespace-separated excludes. Use reponame for cache subdirectory name and pkg["yumreponame"]. Load PGP keys from URLs in key_urls.""" memorydb.RpmMemoryDB.__init__(self, config, source, buildroot) self.reponame = reponame self.excludes = self.config.excludes[:] self.mirrorlist = None self.baseurls = None self.yumconf = None self.key_urls = [] if nc: self.nc = nc else: self.nc = NetworkCache([], self.config.cachedir, self.reponame) if isinstance(source, types.DictType): found_urls = False self.yumconf = source if self.yumconf.has_key("main"): sec = self.yumconf["main"] if sec.has_key("exclude"): self.excludes.extend(sec["exclude"]) sec = self.yumconf[self.reponame] if sec.has_key("exclude"): self.excludes.extend(sec["exclude"]) if sec.has_key("gpgkey"): self.key_urls = sec["gpgkey"] if sec.has_key("baseurl"): self.nc.addCache(sec["baseurl"], self.reponame) found_urls = True if sec.has_key("mirrorlist"): self.mirrorlist = sec["mirrorlist"] found_urls = True if not found_urls: raise ValueError, "yum.conf is missing mirrorlist or baseurl parameter" else: self.baseurls = source self.nc.addCache(self.baseurls, self.reponame) self.repomd = None self.filelist_imported = 0 # Files included in primary.xml self._filerc = re.compile('^(.*bin/.*|/etc/.*|/usr/lib/sendmail)$') self._dirrc = re.compile('^(.*bin/.*|/etc/.*)$') self.comps = None def readMirrorList(self): if not self.is_read and self.mirrorlist and self.yumconf: fname = self.nc.cache(self.mirrorlist, 1) if fname: lines = open(fname).readlines() os.unlink(fname) else: lines = [] for l in lines: l = l.strip() l = l.replace("$ARCH", "$basearch") l = self.yumconf.replaceVars(l) if l and l[0] != "#": self.nc.addCache([ l, ]) def getExcludes(self): return self.excludes def getMirrorList(self): return self.mirrorlist def isIdentitySave(self): """return if package objects that are added are in the db afterwards (.__contains__() returns True and the object are return from searches) """ return False def readRepoMD(self): # First we try and read the repomd file as a starting point. filename = self.nc.cache("repodata/repomd.xml", 1) if not filename: log.error("Couldn't open repomd.xml") return 0 try: fd = open(filename) ip = iterparse(fd, events=("start", "end")) ip = iter(ip) except IOError: log.error("Couldn't parse repomd.xml") return 0 # Create our network cache object self.repomd = self._parse(ip) return 1 def readComps(self): # Try to read a comps.xml file if there is any before we parse the # primary.xml if self.repomd.has_key("group"): if not self.repomd["group"].has_key("location"): log.error( "Couldn't find proper location for comps.xml in repomd") return 0 comps = self.repomd["group"]["location"] (csum, destfile) = self.nc.checksum(comps, "sha") if self.repomd["group"].has_key("checksum") and \ csum == self.repomd["group"]["checksum"]: filename = destfile else: filename = self.nc.cache(comps, 1) if not filename: return 0 try: self.comps = RpmCompsXML(self.config, filename) self.comps.read() except IOError: return 0 return 1 def readPrimary(self): # If we have either a local cache of the primary.xml.gz file or if # it is already local (nfs or local file system) we calculate it's # checksum and compare it with the one from repomd. If they are # the same we don't need to cache it again and can directly use it. if self.repomd.has_key("primary"): if not self.repomd["primary"].has_key("location"): print "Error primary has no location" return 0 primary = self.repomd["primary"]["location"] # if self.repomd["primary"].has_key("checksum"): # (csum, destfile) = self.nc.checksum(primary, "sha") # csum == self.repomd["primary"]["checksum"]: # filename = destfile # else: filename = self.nc.cache(primary, 1) if not filename: print "Error can't find file for primary: " + primary return 0 try: fd = PyGZIP(filename) ip = iterparse(fd, events=("start", "end")) ip = iter(ip) except IOError: log.error("Couldn't parse primary.xml") print "Error parsing primary.xml" return 0 self._parse(ip) return 1 def readPGPKeys(self): for url in self.key_urls: filename = self.nc.cache(url, 1) try: f = file(filename) key_data = f.read() f.close() except Exception, e: log.error("Error reading GPG key %s: %s", filename, e) continue try: key_data = openpgp.isolateASCIIArmor(key_data) keys = openpgp.parsePGPKeys(key_data) except Exception, e: log.error("Invalid GPG key %s: %s", url, e) continue for k in keys: self.keyring.addKey(k)