def findpackageset(hdrlist, dbPath='/'): ts = rpm.TransactionSet(dbPath) ts.setVSFlags(~(rpm.RPMVSF_NORSA | rpm.RPMVSF_NODSA | rpm.RPMVSF_NOMD5)) pkgDict = {} # go through and figure out which packages in the header list are # actually applicable for our architecture pkgDict = {} for h in hdrlist: score1 = rpm.archscore(h[rpm.RPMTAG_ARCH]) if (score1): name = h[rpm.RPMTAG_NAME] if pkgDict.has_key(name): score2 = rpm.archscore(pkgDict[name][rpm.RPMTAG_ARCH]) if (score1 < score2): pkgDict[name] = h else: pkgDict[name] = h hdlist = pkgDict.values() pkgDict = {} # loop through packages and find ones which are a newer # version than what we have for pkg in hdlist: mi = ts.dbMatch('name', pkg[rpm.RPMTAG_NAME]) for h in mi: val = rpm.versionCompare(h, pkg) if (val > 0): # dEBUG("found older version of %(name)s" % h) pass elif (val < 0): # dEBUG("found newer version of %(name)s" % h) # check if we already have this package in our dictionary addNewPackageToUpgSet(pkgDict, pkg) else: # dEBUG("found same verison of %(name)s" % h) pass # handle obsoletes for pkg in hdlist: if pkg[rpm.RPMTAG_NAME] in pkgDict.keys(): # dEBUG("%(name)s is already selected" % pkg) continue if pkg[rpm.RPMTAG_OBSOLETENAME] is not None: for obs in pkg[rpm.RPMTAG_OBSOLETENAME]: mi = ts.dbMatch('name', obs) # FIXME: I should really iterate over all matches and verify # versioned obsoletes, but nothing in Red Hat Linux uses # them, so I'll optimize for h in mi: # dEBUG("adding %(name)s to the upgrade set for obsoletes" % pkg) addNewPackageToUpgSet(pkgDict, pkg) break return pkgDict.values()
def __lt__(self, other): rc = cmp(self.name, other.name) if type(other) is RPMPackage: if rc == 0 and self.version != other.version: selfver, selfarch = splitarch(self.version) otherver, otherarch = splitarch(other.version) if selfver != otherver: rc = vercmp(self.version, other.version) if rc == 0: rc = -cmp(archscore(selfarch), archscore(otherarch)) return rc == -1
def comparePackagesArch(pkg1, pkg2): arch1 = pkg1[4] arch2 = pkg2[4] score1 = rpm.archscore(arch1) score2 = rpm.archscore(arch2) if score1 > score2: return 1 if score1 < score2: return -1 if score1 == score2: return 0
def getArchScore(arch, _sm=_SCOREMAP): try: rpm.platformscore(arch) if arch not in _sm: score = rpm.archscore(arch) _sm[arch] = score return _sm.get(arch, 0) except AttributeError: return 1
def _is_compatible_arch(self, arch): if rpm.archscore(arch) == 0: # Itanium special casing. if self._arch == 'ia64' and re.match('i.86', arch): return True else: return False else: return True
def __init__(self, hdlist, compatPackages = None, noscore = 0, prunePPCKernels = 1): self.hdlist = hdlist self.packages = {} newCompat = [] self.hasFullHeaders = 0 for h in hdlist: name = h[rpm.RPMTAG_NAME] # we should only keep kernel-pseries and kernel-iseries on # the appropriate machine if prunePPCKernels and name == "kernel-pseries" and iutil.getPPCMachine() != "pSeries": continue if prunePPCKernels and name == "kernel-iseries" and iutil.getPPCMachine() != "iSeries": continue # FIXME: this is a really bad hack so that we can try to avoid # weirdness with kernel packages if prunePPCKernels and (name == "kernel-iseries" or name == "kernel-pseries"): self.packages["kernel"] = Package(h) if noscore: self.packages[name] = Package(h) continue score1 = rpm.archscore(h['arch']) if (score1): if self.packages.has_key(name): score2 = rpm.archscore(self.packages[name].h['arch']) if (score1 < score2): newCompat.append(self.packages[name]) self.packages[name] = Package(h) else: newCompat.append(Package(h)) else: self.packages[name] = Package(h) if hdlist and not self.packages: raise RuntimeError, ("the header list was read, but no packages " "matching architecture '%s' were found." % os.uname()[4]) if compatPackages != None: compatPackages.extend(newCompat)
def addNewPackageToUpgSet(pkgDict, pkg): """Check to see if there's already a pkg by the name of pkg already in our dictionary. If not, add this one. If there is, see if this one is 'newer' or has a 'better' arch.""" name = pkg[rpm.RPMTAG_NAME] if not pkgDict.has_key(name): # nope pkgDict[name] = pkg else: # first check version val = rpm.versionCompare(pkgDict[name], pkg) if val < 0: # we're newer, add this one pkgDict[name] = pkg elif val == 0: # same version, so check the architecture newscore = rpm.archscore(pkg[rpm.RPMTAG_ARCH]) oldscore = pkgDict[name][rpm.RPMTAG_ARCH] if newscore and newscore < oldscore: # if the score is less, we're "better" pkgDict[name] = pkg
def getArchScore(arch, _sm=_SCOREMAP): if arch not in _sm: score = rpm.archscore(arch) _sm[arch] = score return _sm.get(arch, 0)
def load(self): CM = self.COMPMAP CF = self.COMPFLAGS Pkg = RPMPackage Prv = RPMProvides NPrv = RPMNameProvides PreReq = RPMPreRequires Req = RPMRequires Obs = RPMObsoletes Cnf = RPMConflicts prog = iface.getProgress(self._cache) for h, offset in self.getHeaders(prog): if h[1106]: # RPMTAG_SOURCEPACKAGE continue arch = h[1022] # RPMTAG_ARCH if rpm.archscore(arch) == 0: continue name = h[1000] # RPMTAG_NAME epoch = h[1003] # RPMTAG_EPOCH if epoch and epoch != "0": # RPMTAG_VERSION, RPMTAG_RELEASE version = "%s:%s-%s" % (epoch, h[1001], h[1002]) else: # RPMTAG_VERSION, RPMTAG_RELEASE version = "%s-%s" % (h[1001], h[1002]) versionarch = "%s@%s" % (version, arch) n = h[1047] # RPMTAG_PROVIDENAME v = h[1113] # RPMTAG_PROVIDEVERSION prvdict = {} for i in range(len(n)): ni = n[i] if not ni.startswith("config("): vi = v[i] if vi and vi[:2] == "0:": vi = vi[2:] if ni == name and vi == version: prvdict[(NPrv, intern(ni), versionarch)] = True else: prvdict[(Prv, intern(ni), vi or None)] = True prvargs = prvdict.keys() n = h[1049] # RPMTAG_REQUIRENAME if n: f = h[1048] # RPMTAG_REQUIREFLAGS v = h[1050] # RPMTAG_REQUIREVERSION reqdict = {} for i in range(len(n)): ni = n[i] if ni[:7] not in ("rpmlib(", "config("): vi = v[i] or None if vi and vi[:2] == "0:": vi = vi[2:] r = CM.get(f[i] & CF) if ((r is not None and r != "=") or ((Prv, ni, vi) not in prvdict)): # RPMSENSE_PREREQ | # RPMSENSE_SCRIPT_PRE | # RPMSENSE_SCRIPT_PREUN | # RPMSENSE_SCRIPT_POST | # RPMSENSE_SCRIPT_POSTUN == 7744 reqdict[(f[i] & 7744 and PreReq or Req, intern(ni), r, vi)] = True reqargs = reqdict.keys() else: reqargs = None n = h[1054] # RPMTAG_CONFLICTNAME if n: f = h[1053] # RPMTAG_CONFLICTFLAGS # FIXME (20050321): Solaris rpm 4.1 hack if type(f) == int: f = [f] v = h[1055] # RPMTAG_CONFLICTVERSION cnfargs = [] for i in range(len(n)): vi = v[i] or None if vi and vi[:2] == "0:": vi = vi[2:] cnfargs.append((Cnf, n[i], CM.get(f[i] & CF), vi)) else: cnfargs = [] obstup = (Obs, name, '<', versionarch) n = h[1090] # RPMTAG_OBSOLETENAME if n: f = h[1114] # RPMTAG_OBSOLETEFLAGS # FIXME (20050321): Solaris rpm 4.1 hack if type(f) == int: f = [f] v = h[1115] # RPMTAG_OBSOLETEVERSION upgargs = [] for i in range(len(n)): vi = v[i] or None if vi and vi[:2] == "0:": vi = vi[2:] upgargs.append((Obs, n[i], CM.get(f[i] & CF), vi)) cnfargs.extend(upgargs) upgargs.append(obstup) else: upgargs = [obstup] pkg = self.buildPackage((Pkg, name, versionarch), prvargs, reqargs, upgargs, cnfargs) pkg.loaders[self] = offset self._offsets[offset] = pkg self._groups[pkg] = intern(h[rpm.RPMTAG_GROUP])
def solveDep(self, unknowns, availList, msgCallback = None, progressCallback = None, refreshCallback = None): self.cfg = config.initUp2dateConfig() self.log = up2dateLog.initLog() self.log.log_me("solving dep for: %s" % unknowns) self.refreshCallback = refreshCallback self.progressCallback = progressCallback self.msgCallback = msgCallback self.availList = availList availList.sort() self.availListHash = {} for p in self.availList: if self.availListHash.has_key(tuple(p[:4])): self.availListHash[tuple(p[:4])].append(p) else: self.availListHash[tuple(p[:4])] = [p] self.retDict = {} self.getSolutions(unknowns, progressCallback = self.progressCallback, msgCallback = self.msgCallback) reslist = [] self.depToPkg = DictOfLists() self.depsNotAvailable = DictOfLists() # self.depToPkg = {} #FIXME: this should be cached, I dont really need to query the db # for this everytime self.installedPkgList = rpmUtils.getInstalledPackageList(getArch=1) self.installedPkgHash = {} for pkg in self.installedPkgList: if self.installedPkgHash.has_key(pkg[0]): self.installedPkgHash[pkg[0]].append(pkg) else: self.installedPkgHash[pkg[0]] = [pkg] # we didnt get any results, bow out... if not len(self.retDict): return (reslist, self.depToPkg) newList = [] availListNVRE = map(lambda p: p[:4], self.availList) failedDeps = [] solutionPkgs = [] pkgs = [] for dep in self.retDict.keys(): # skip the rest if we didnt get a result if len(self.retDict[dep]) == 0: continue solutions = self.retDict[dep] # fixme, grab the first package that satisfies the dep # but make sure we match nvre against the list of avail packages # so we grab the right version of the package # if we only get one soltution, use it. No point in jumping # though other hoops if len(solutions) == 1: for solution in solutions: pkgs.append(solution) # we've got more than one possible solution, do some work # to figure out if I want one, some, or all of them elif len(solutions) > 1: # try to install the new version of whatever arch is # installed solutionsInstalled = self.__getSolutionsInstalled(solutions) found = 0 if len(solutionsInstalled): for p in solutionsInstalled: pkgs.append(p) self.depToPkg[dep] = p found = 1 if found: break # we dont have any of possible solutions installed, pick one else: # this is where we could do all sort of heuristics to pick # best one. For now, grab the first one in the list thats # available #FIXME: we need to arch score here for multilib/kernel # packages that dont have a version installed # This tends to happen a lot when isntalling into # empty chroots (aka, pick which of the kernels to # install). # ie, this is the pure heuristic approach... shortest = solutions[0] for solution in solutions: if len(shortest[0]) > len(solution[0]): shortest = solution # if we get this far, its still possible that we have package # that is multilib and we need to install both versions of # this is a check for that... if self.installedPkgHash.has_key(shortest[0]): iList = self.installedPkgHash[shortest[0]] for iPkg in iList: if self.availListHash.has_key(tuple(shortest[:4])): for i in self.availListHash[tuple(shortest[:4])]: if self.cfg['forcedArch']: arches = self.cfg['forcedArch'] if i[4] in arches: pkgs.append(i) self.depToPkg[dep] = i break else: # its not the same package we have installed if iPkg[:5] != i[:5]: # this arch matches the arch of a package # installed if iPkg[4] == i[4]: pkgs.append(i) self.depToPkg[dep] = i break # you may be asking yourself, wtf is that madness that follows? # well, good question... # its basically a series of kluges to work around packaging problems # in RHEL-3 (depends who you ask... But basically, its packages doing # stuff that was determined to be "unsupported" at the time of the # initial multilib support, but packages did it later anyway # Basically, what we are trying to do is pick the best arch of # a package to solve a dep. Easy enough. The tricky part is # what happens when we discover the best arch is already in # transation and is _not_ solving the dep, so we need to look # at the next best arch. So we check to see if we added it to # the list of selected packges already, and if so, add the # next best arch to the set. To make it uglier, the second best # arch might not be valid at all, so in that case, dont use it # (which will cause an unsolved dep, but they happen...) if self.availListHash.has_key(tuple(shortest[:4])): avail = self.availListHash[tuple(shortest[:4])] bestArchP = None useNextBestArch = None bestArchP2 = None # a saner approach might be to find the applicable arches, # sort them, and walk over them in order # remove the items with archscore <= 0 app_avail = filter(lambda a: rpm.archscore(a[4]), avail) # sort the items by archscore, most approriate first app_avail.sort(lambda a,b: cmp(rpm.archscore(a[4]),rpm.archscore(b[4]))) # so, whats wrong with this bit? well, if say "libgnutls.so(64bit)" doesn't # find a dep, we'll try to solve it with gnutls.i386 # its because "gnutls" and "libgnutls.so(64bit)" are in the same set of # deps. Since gnutls.x86_64 is added for the "gnutls" dep, its in the # list of already selected for for i in app_avail: if i in self.selectedPkgs: continue pkgs.append(i) self.depToPkg[dep] = i # we found something, stop iterating over available break # we found something for this dep, stop iterating continue else: # FIXME: in an ideal world, I could raise an exception here, but that will break the current gui pkgs.append(p) self.depToPkg[dep] = p # raise UnsolvedDependencyError("Packages %s provide dep %s but are not available for install based on client config" % (pkgs,dep), dep, pkgs ) for pkg in pkgs: self.selectedPkgs.append(pkg) if pkg[:4] in availListNVRE: newList.append(pkg) else: newList.append(pkg) reslist = newList # FIXME: we need to return the list of stuff that was skipped # because it wasn't on the available list and present it to the # user something like: # blippy-1.0-1 requires barpy-2.0-1 but barpy-3.0-1 is already isntalled #print "\n\nself.depsNotAvailable" #pprint.pprint(self.depsNotAvailable) #pprint.pprint(self.depToPkg) return (reslist, self.depToPkg)
def handleArchEnd(self, name, attrs, data): if rpm.archscore(data) == 0: self._skip = self.PACKAGE else: self._arch = data
def listPackages(self, channel, msgCallback = None, progressCallback = None): # TODO: where do we implement cache validation? guess we # use http header time stamps to make a best guess since we # dont have any real info about the file format # a glob used to find the old versions to cleanup # FIXME: this is probabaly overkill... Should only have # one version of any given globPattern = "%s/%s.*" % (self.cfg["storageDir"], channel['label']) oldLists = glob.glob(globPattern) channelTimeStamp = None if oldLists: filename = oldLists[0] filename = os.path.basename(filename) oldVersion = string.split(filename, '.')[-1] channelTimeStamp = time.strptime(oldVersion,"%Y%m%d%H%M%S") # for yum stuff, we assume that serverUrl is the base # path, channel is the relative path, and version isnt # user url = "%s/headers/header.info" % (channel['url']) if msgCallback: msgCallback("Fetching %s" % url) # oh, this lame, but implement a fancy url fetcher later # heck, maybe even borrow the one from yum #print urlUtils ret = urlUtils.fetchUrl(url, lastModified=channelTimeStamp, progressCallback = progressCallback, agent = "Up2date %s/Yum" % up2dateUtils.version()) if ret: (buffer, lmtime) = ret else: return None if not lmtime: lmtime = time.gmtime(time.time()) version = time.strftime("%Y%m%d%H%M%S", lmtime) # use the time stamp on the headerlist as the channel "version" filePath = "%s/%s.%s" % (self.cfg["storageDir"], channel['label'], version) # it's possible to get bogus data here, so at least try not # to traceback if buffer: lines = string.split(buffer) else: lines = [] # this gives us the raw yum header list, which is _not_ # in the pretty format up2date likes, so convert it # and sadly, I can no longer proudly state that up2date # at no points attempts to parse rpm filenames into something # useful. At least yum includes the epoch pkgList = [] # yum can have a different path for each rpm. Not exactly # sure how this meets the "keep it simple" idea, but alas self.pkgNamePath = {} for line in lines: if line == "" or line[0] == "#": continue (envra, rpmPath) = string.split(line, '=') rpmPath = string.strip(rpmPath) (epoch, name, ver, rel, arch) = self._stripENVRA(envra) # quite possibly need to encode channel info here as well if epoch == "0" or epoch == 0: epoch = "" # hmm, if an arch doesnt apply, guess no point in # keeping it around, should make package lists smaller # and cut down on some churn if rpm.archscore(arch) == 0: continue self.pkgNamePath[(name,ver,rel,epoch,arch)] = rpmPath # doh, no size info. FIXME size = "1000" # er, yeah... thats not lame at all... pkgList.append([name, ver, rel, epoch, arch, size, channel['label'], rpmPath]) # now we have the package list, convert it to xmlrpc style # presentation and dump it pkgList.sort(lambda a, b: cmp(a[0], b[0])) count = 0 total = len(pkgList) rd = repoDirector.initRepoDirector() for pkg in pkgList: # were deep down in the yum specific bits, but we want to call # the generic getHeader to get it off disc or cache hdr = rd.getHeader([name,ver,rel,epoch,arch, "0",channel['label']]) if progressCallback: progressCallback(count, total) count = count + 1 rpmSourceUtils.saveListToDisk(pkgList, filePath, globPattern) self.pkglists[channel['label']] = pkgList return pkgList
def listPackages(self, channel, msgCallback=None, progressCallback=None): # TODO: where do we implement cache validation? guess we # use http header time stamps to make a best guess since we # dont have any real info about the file format # a glob used to find the old versions to cleanup # FIXME: this is probabaly overkill... Should only have # one version of any given globPattern = "%s/%s.*" % (self.cfg["storageDir"], channel['label']) oldLists = glob.glob(globPattern) channelTimeStamp = None if oldLists: filename = oldLists[0] filename = os.path.basename(filename) oldVersion = string.split(filename, '.')[-1] channelTimeStamp = time.strptime(oldVersion, "%Y%m%d%H%M%S") # for yum stuff, we assume that serverUrl is the base # path, channel is the relative path, and version isnt # user url = "%s/headers/header.info" % (channel['url']) if msgCallback: msgCallback("Fetching %s" % url) # oh, this lame, but implement a fancy url fetcher later # heck, maybe even borrow the one from yum #print urlUtils ret = urlUtils.fetchUrl(url, lastModified=channelTimeStamp, progressCallback=progressCallback, agent="Up2date %s/Yum" % up2dateUtils.version()) if ret: (buffer, lmtime) = ret else: return None if not lmtime: lmtime = time.gmtime(time.time()) version = time.strftime("%Y%m%d%H%M%S", lmtime) # use the time stamp on the headerlist as the channel "version" filePath = "%s/%s.%s" % (self.cfg["storageDir"], channel['label'], version) # it's possible to get bogus data here, so at least try not # to traceback if buffer: lines = string.split(buffer) else: lines = [] # this gives us the raw yum header list, which is _not_ # in the pretty format up2date likes, so convert it # and sadly, I can no longer proudly state that up2date # at no points attempts to parse rpm filenames into something # useful. At least yum includes the epoch pkgList = [] # yum can have a different path for each rpm. Not exactly # sure how this meets the "keep it simple" idea, but alas self.pkgNamePath = {} for line in lines: if line == "" or line[0] == "#": continue (envra, rpmPath) = string.split(line, '=') rpmPath = string.strip(rpmPath) (epoch, name, ver, rel, arch) = self._stripENVRA(envra) # quite possibly need to encode channel info here as well if epoch == "0" or epoch == 0: epoch = "" # hmm, if an arch doesnt apply, guess no point in # keeping it around, should make package lists smaller # and cut down on some churn if rpm.archscore(arch) == 0: continue self.pkgNamePath[(name, ver, rel, epoch, arch)] = rpmPath # doh, no size info. FIXME size = "1000" # er, yeah... thats not lame at all... pkgList.append( [name, ver, rel, epoch, arch, size, channel['label'], rpmPath]) # now we have the package list, convert it to xmlrpc style # presentation and dump it pkgList.sort(lambda a, b: cmp(a[0], b[0])) count = 0 total = len(pkgList) rd = repoDirector.initRepoDirector() for pkg in pkgList: # were deep down in the yum specific bits, but we want to call # the generic getHeader to get it off disc or cache hdr = rd.getHeader( [name, ver, rel, epoch, arch, "0", channel['label']]) if progressCallback: progressCallback(count, total) count = count + 1 rpmSourceUtils.saveListToDisk(pkgList, filePath, globPattern) self.pkglists[channel['label']] = pkgList return pkgList
def load(self): CM = self.COMPMAP CF = self.COMPFLAGS Pkg = RPMPackage Prv = RPMProvides NPrv = RPMNameProvides PreReq = RPMPreRequires Req = RPMRequires Obs = RPMObsoletes Cnf = RPMConflicts prog = iface.getProgress(self._cache) for h, offset in self.getHeaders(prog): if h[1106]: # RPMTAG_SOURCEPACKAGE continue arch = h[1022] # RPMTAG_ARCH if rpm.archscore(arch) == 0: continue name = h[1000] # RPMTAG_NAME epoch = h[1003] # RPMTAG_EPOCH if epoch and epoch != "0": # RPMTAG_VERSION, RPMTAG_RELEASE version = "%s:%s-%s" % (epoch, h[1001], h[1002]) else: # RPMTAG_VERSION, RPMTAG_RELEASE version = "%s-%s" % (h[1001], h[1002]) versionarch = "%s@%s" % (version, arch) n = h[1047] # RPMTAG_PROVIDENAME v = h[1113] # RPMTAG_PROVIDEVERSION prvdict = {} for i in range(len(n)): ni = n[i] if not ni.startswith("config("): vi = v[i] if vi and vi[:2] == "0:": vi = vi[2:] if ni == name and vi == version: prvdict[(NPrv, intern(ni), versionarch)] = True else: prvdict[(Prv, intern(ni), vi or None)] = True prvargs = prvdict.keys() n = h[1049] # RPMTAG_REQUIRENAME if n: f = h[1048] # RPMTAG_REQUIREFLAGS v = h[1050] # RPMTAG_REQUIREVERSION reqdict = {} for i in range(len(n)): ni = n[i] if ni[:7] not in ("rpmlib(", "config("): vi = v[i] or None if vi and vi[:2] == "0:": vi = vi[2:] r = CM.get(f[i]&CF) if ((r is not None and r != "=") or ((Prv, ni, vi) not in prvdict)): # RPMSENSE_PREREQ | # RPMSENSE_SCRIPT_PRE | # RPMSENSE_SCRIPT_PREUN | # RPMSENSE_SCRIPT_POST | # RPMSENSE_SCRIPT_POSTUN == 7744 reqdict[(f[i]&7744 and PreReq or Req, intern(ni), r, vi)] = True reqargs = reqdict.keys() else: reqargs = None n = h[1054] # RPMTAG_CONFLICTNAME if n: f = h[1053] # RPMTAG_CONFLICTFLAGS # FIXME (20050321): Solaris rpm 4.1 hack if type(f) == int: f = [f] v = h[1055] # RPMTAG_CONFLICTVERSION cnfargs = [] for i in range(len(n)): vi = v[i] or None if vi and vi[:2] == "0:": vi = vi[2:] cnfargs.append((Cnf, n[i], CM.get(f[i]&CF), vi)) else: cnfargs = [] obstup = (Obs, name, '<', versionarch) n = h[1090] # RPMTAG_OBSOLETENAME if n: f = h[1114] # RPMTAG_OBSOLETEFLAGS # FIXME (20050321): Solaris rpm 4.1 hack if type(f) == int: f = [f] v = h[1115] # RPMTAG_OBSOLETEVERSION upgargs = [] for i in range(len(n)): vi = v[i] or None if vi and vi[:2] == "0:": vi = vi[2:] upgargs.append((Obs, n[i], CM.get(f[i]&CF), vi)) cnfargs.extend(upgargs) upgargs.append(obstup) else: upgargs = [obstup] pkg = self.buildPackage((Pkg, name, versionarch), prvargs, reqargs, upgargs, cnfargs) pkg.loaders[self] = offset self._offsets[offset] = pkg self._groups[pkg] = intern(h[rpm.RPMTAG_GROUP])
def solveDep(self, unknowns, availList, msgCallback=None, progressCallback=None, refreshCallback=None): self.cfg = config.initUp2dateConfig() self.log = up2dateLog.initLog() self.log.log_me("solving dep for: %s" % unknowns) self.refreshCallback = refreshCallback self.progressCallback = progressCallback self.msgCallback = msgCallback self.availList = availList availList.sort() self.availListHash = {} for p in self.availList: if self.availListHash.has_key(tuple(p[:4])): self.availListHash[tuple(p[:4])].append(p) else: self.availListHash[tuple(p[:4])] = [p] self.retDict = {} self.getSolutions(unknowns, progressCallback=self.progressCallback, msgCallback=self.msgCallback) reslist = [] self.depToPkg = DictOfLists() self.depsNotAvailable = DictOfLists() # self.depToPkg = {} #FIXME: this should be cached, I dont really need to query the db # for this everytime self.installedPkgList = rpmUtils.getInstalledPackageList(getArch=1) self.installedPkgHash = {} for pkg in self.installedPkgList: if self.installedPkgHash.has_key(pkg[0]): self.installedPkgHash[pkg[0]].append(pkg) else: self.installedPkgHash[pkg[0]] = [pkg] # we didnt get any results, bow out... if not len(self.retDict): return (reslist, self.depToPkg) newList = [] availListNVRE = map(lambda p: p[:4], self.availList) failedDeps = [] solutionPkgs = [] pkgs = [] for dep in self.retDict.keys(): # skip the rest if we didnt get a result if len(self.retDict[dep]) == 0: continue solutions = self.retDict[dep] # fixme, grab the first package that satisfies the dep # but make sure we match nvre against the list of avail packages # so we grab the right version of the package # if we only get one soltution, use it. No point in jumping # though other hoops if len(solutions) == 1: for solution in solutions: pkgs.append(solution) # we've got more than one possible solution, do some work # to figure out if I want one, some, or all of them elif len(solutions) > 1: # try to install the new version of whatever arch is # installed solutionsInstalled = self.__getSolutionsInstalled(solutions) found = 0 if len(solutionsInstalled): for p in solutionsInstalled: pkgs.append(p) self.depToPkg[dep] = p found = 1 if found: break # we dont have any of possible solutions installed, pick one else: # this is where we could do all sort of heuristics to pick # best one. For now, grab the first one in the list thats # available #FIXME: we need to arch score here for multilib/kernel # packages that dont have a version installed # This tends to happen a lot when isntalling into # empty chroots (aka, pick which of the kernels to # install). # ie, this is the pure heuristic approach... shortest = solutions[0] for solution in solutions: if len(shortest[0]) > len(solution[0]): shortest = solution # if we get this far, its still possible that we have package # that is multilib and we need to install both versions of # this is a check for that... if self.installedPkgHash.has_key(shortest[0]): iList = self.installedPkgHash[shortest[0]] for iPkg in iList: if self.availListHash.has_key(tuple(shortest[:4])): for i in self.availListHash[tuple( shortest[:4])]: if self.cfg['forcedArch']: arches = self.cfg['forcedArch'] if i[4] in arches: pkgs.append(i) self.depToPkg[dep] = i break else: # its not the same package we have installed if iPkg[:5] != i[:5]: # this arch matches the arch of a package # installed if iPkg[4] == i[4]: pkgs.append(i) self.depToPkg[dep] = i break # you may be asking yourself, wtf is that madness that follows? # well, good question... # its basically a series of kluges to work around packaging problems # in RHEL-3 (depends who you ask... But basically, its packages doing # stuff that was determined to be "unsupported" at the time of the # initial multilib support, but packages did it later anyway # Basically, what we are trying to do is pick the best arch of # a package to solve a dep. Easy enough. The tricky part is # what happens when we discover the best arch is already in # transation and is _not_ solving the dep, so we need to look # at the next best arch. So we check to see if we added it to # the list of selected packges already, and if so, add the # next best arch to the set. To make it uglier, the second best # arch might not be valid at all, so in that case, dont use it # (which will cause an unsolved dep, but they happen...) if self.availListHash.has_key(tuple(shortest[:4])): avail = self.availListHash[tuple(shortest[:4])] bestArchP = None useNextBestArch = None bestArchP2 = None # a saner approach might be to find the applicable arches, # sort them, and walk over them in order # remove the items with archscore <= 0 app_avail = filter(lambda a: rpm.archscore(a[4]), avail) # sort the items by archscore, most approriate first app_avail.sort(lambda a, b: cmp( rpm.archscore(a[4]), rpm.archscore(b[4]))) # so, whats wrong with this bit? well, if say "libgnutls.so(64bit)" doesn't # find a dep, we'll try to solve it with gnutls.i386 # its because "gnutls" and "libgnutls.so(64bit)" are in the same set of # deps. Since gnutls.x86_64 is added for the "gnutls" dep, its in the # list of already selected for for i in app_avail: if i in self.selectedPkgs: continue pkgs.append(i) self.depToPkg[dep] = i # we found something, stop iterating over available break # we found something for this dep, stop iterating continue else: # FIXME: in an ideal world, I could raise an exception here, but that will break the current gui pkgs.append(p) self.depToPkg[dep] = p # raise UnsolvedDependencyError("Packages %s provide dep %s but are not available for install based on client config" % (pkgs,dep), dep, pkgs ) for pkg in pkgs: self.selectedPkgs.append(pkg) if pkg[:4] in availListNVRE: newList.append(pkg) else: newList.append(pkg) reslist = newList # FIXME: we need to return the list of stuff that was skipped # because it wasn't on the available list and present it to the # user something like: # blippy-1.0-1 requires barpy-2.0-1 but barpy-3.0-1 is already isntalled #print "\n\nself.depsNotAvailable" #pprint.pprint(self.depsNotAvailable) #pprint.pprint(self.depToPkg) return (reslist, self.depToPkg)