def reorderChangesets(f, csPath, initialSizes, maxisosize, isoblocksize, baseTrove): reservedTroves = [] sizedList = [] infoTroves = [] baseTroves = [] for line in f: csFile = line.split()[0] trvName = line.split()[1] if trvName in basicminimal: reservedTroves.append(line) spaceUsed = spaceused(join(csPath, csFile), isoblocksize) if trvName.startswith('info-'): infoTroves.append((spaceUsed, line)) elif trvName == baseTrove: baseTroves.append((spaceUsed, line)) else: sizedList.append((spaceUsed, line)) sizedList = [x for x in reversed(sorted(sizedList))] reservedList = [] if reservedTroves: trvNames = set() for trvLine in reservedTroves: trvNames.add(trvLine.split()[1]) cs = changeset.ChangeSetFromFile( \ join(csPath, trvLine.split()[0])) trv = Trove([x for x in cs.iterNewTroveList()][0]) for includedTrv in [x[0].split(':')[0] for x in \ trv.iterTroveList(strongRefs = True, weakRefs = True)]: trvNames.add(includedTrv) for size, line in sizedList[:]: if line.split()[1] in trvNames: reservedList.append((size, line)) sizedList.remove((size, line)) sizedList = infoTroves + reservedList + baseTroves + sizedList reOrdList = [[[], maxisosize - initialSizes[0]]] for size, line in sizedList: match = False for i, (changesets, limit) in enumerate(reOrdList): if maxisosize and (size <= limit): reOrdList[i][0].append(line) reOrdList[i][1] -= size match = True break if not match: reOrdList.append([[line], maxisosize - size - initialSizes[1]]) csList = [] for disc in [x[0] for x in reOrdList]: csList.extend(disc) return csList
def commitJobs(conaryclient, jobList, reposName, message=None, commitOutdatedSources=False, sourceOnly = False, excludeSpecs=None, writeToFile=None): jobsToCommit = {} alreadyCommitted = [] finalCs = changeset.ReadOnlyChangeSet() mapping = {} for job in jobList: if job.isCommitted(): alreadyCommitted.append(job) else: jobsToCommit[job.jobId] = job jobsToCommit = jobsToCommit.values() # dedup job list if not jobsToCommit: err = 'Job(s) already committed' return False, err allTroves = [] trovesByBranch = {} alreadyCommitted = False for job in jobsToCommit: mapping[job.jobId] = {} for trove in job.iterTroves(): allTroves.append(trove) troveVersion = trove.getVersion() if troveVersion.getHost() == reposName: if not troveVersion.branch().hasParentBranch(): message = ('Cannot commit filesystem cook %s - ' ' nowhere to commit to!' % trove.getName()) return False, message assert(allTroves) source = trovesource.SimpleTroveSource() if excludeSpecs: excludeSpecsWithContext = {} troveMap = {} for excludeSpec in excludeSpecs: if len(excludeSpec) == 4: context = excludeSpec[3] else: context = None excludeSpecsWithContext.setdefault( excludeSpec[:3], []).append(context) excludeSpecs = [ x[:3] for x in excludeSpecs ] for trove in allTroves: troveTup = (trove.getName().split(':')[0], trove.getVersion(), trove.getFlavor()) source.addTrove(*troveTup) troveMap.setdefault(troveTup, []).append(trove) source.searchAsDatabase() matches = source.findTroves(None, excludeSpecs, None, allowMissing=True) trvMatches = [] for excludeSpec, matchList in matches.iteritems(): contexts = excludeSpecsWithContext[excludeSpec] for match in matchList: for trv in troveMap[match]: if trv.context in contexts or None in contexts: trvMatches.append(trv) allTroves = [ x for x in allTroves if x not in trvMatches ] if not allTroves: message = ('All troves excluded - not committing') return False, message repos = conaryclient.getRepos() trovesByNBF = {} sourcesToCheck = [] branchMap = {} trovesToClone = [] for trove in allTroves: builtTroves = list(trove.iterBuiltTroves()) if not builtTroves: continue if builtTroves[0][1].getHost() != reposName: alreadyCommitted = True for n,v,f in builtTroves: trovesByNBF[n, v.branch(), f] = (trove, v) continue troveVersion = trove.getVersion() if troveVersion.getHost() == reposName: sourceTup = (trove.getName(), troveVersion, Flavor()) targetBranch = troveVersion.branch().parentBranch() branchMap[troveVersion.branch()] = targetBranch nbf = trove.getName(), targetBranch, Flavor() if nbf in trovesByNBF: if trovesByNBF[nbf][1] != troveVersion: badVersion = trovesByNBF[nbf][1] return False, ("Cannot commit two different versions of source component %s:" " %s and %s" % (trove.getName(), troveVersion, badVersion)) trovesByNBF[nbf] = trove, troveVersion sourcesToCheck.append(sourceTup) if sourceOnly: continue for troveTup in builtTroves: branch = troveTup[1].branch() targetBranch = branch.parentBranch() # add mapping so that when the cloning is done # we can tell what commit resulted in what binaries. nbf = (troveTup[0], targetBranch, troveTup[2]) if nbf in trovesByNBF: otherBinary = trovesByNBF[nbf][0].getBinaryTroves()[0] if otherBinary[1].branch() == targetBranch: # this one's already committed. break # discard the later of the two commits. if trovesByNBF[nbf][0].getVersion() > trove.getVersion(): # we're the earlier one badTrove, badVersion = trovesByNBF[nbf] newTrove = trove newVersion = troveTup[1] else: badTrove = trove badVersion = troveTup[1] newTrove, newVersion = trovesByNBF[nbf] name = nbf[0] flavor = nbf[2] skipped = [] for badTroveTup in badTrove.iterBuiltTroves(): badNbf = (badTroveTup[0], targetBranch, badTroveTup[2]) if not ':' in badTroveTup[0]: skipped.append(badTroveTup[0]) if badNbf in trovesByNBF and badTrove is trovesByNBF[badNbf][0]: del trovesByNBF[badNbf] skipped = '%s' % (', '.join(skipped)) log.warning("Not committing %s on %s[%s]%s - overridden by" " %s[%s]%s" % (skipped, badTroveTup[1], badTroveTup[2], badTrove.getContextStr(), newVersion, flavor, newTrove.getContextStr())) if trove is badTrove: break trovesByNBF[nbf] = trove, troveTup[1] branchMap[branch] = targetBranch for nbf, (trove, tupVersion) in trovesByNBF.items(): if tupVersion.branch() != nbf[1]: trovesToClone.append((nbf[0], tupVersion, nbf[2])) if not trovesToClone: if sourceOnly: err = 'Could not find sources to commit' elif alreadyCommitted: log.warning('All built troves have already been committed') return True, {} else: err = 'Can only commit built troves, none found' return False, err if sourcesToCheck and not commitOutdatedSources: outdated = _checkOutdatedSources(repos, sourcesToCheck) if outdated: outdated = ( '%s=%s (replaced by newer %s)' \ % (name, builtVer, newVer.trailingRevision()) for (name, builtVer, newVer) in outdated) err = ('The following source troves are out of date:\n%s\n\n' 'Use --commit-outdated-sources to commit anyway' % '\n'.join(outdated)) return False, err # only update build info if we'll be okay if some buildreqs are not # updated callback = callbacks.CloneCallback(conaryclient.cfg, message) passed, cs = conaryclient.createTargetedCloneChangeSet( branchMap, trovesToClone, updateBuildInfo=True, cloneSources=False, trackClone=False, callback=callback, fullRecurse=False) if passed: oldTroves = [] for troveCs in cs.iterNewTroveList(): if troveCs.getOldVersion(): oldTroves.append(troveCs.getOldNameVersionFlavor()) if oldTroves: oldDict = {} for oldTrove in repos.getTroves(oldTroves): oldDict.setdefault(oldTrove.getNameVersionFlavor(), []).append(oldTrove) for troveCs in cs.iterNewTroveList(): if troveCs.getOldVersion(): trv = oldDict[troveCs.getOldNameVersionFlavor()].pop() trv.applyChangeSet(troveCs) else: trv = Trove(troveCs) for _, childVersion, _ in trv.iterTroveList(strongRefs=True, weakRefs=True): # make sure there are not any references to the internal # rmake repository - that would be a bad bug - easy to # do with the way we do cooking of groups. onRepos = childVersion.getHost() == reposName assert not onRepos, "Trove %s references repository" % trv n,v,f = troveCs.getNewNameVersionFlavor() trove, troveVersion = trovesByNBF[n, v.branch(), f] troveNVFC = trove.getNameVersionFlavor(withContext=True) # map jobId -> trove -> binaries mapping[trove.jobId].setdefault(troveNVFC, []).append((n,v,f)) else: return False, 'Creating clone failed' signatureKey = conaryclient.cfg.signatureKey if writeToFile: cs.writeToFile(writeToFile) else: repos.commitChangeSet(cs, callback=callback) return True, mapping
def commitJobs(conaryclient, jobList, reposName, message=None, commitOutdatedSources=False, sourceOnly=False, excludeSpecs=None, writeToFile=None): jobsToCommit = {} alreadyCommitted = [] finalCs = changeset.ReadOnlyChangeSet() mapping = {} for job in jobList: if job.isCommitted(): alreadyCommitted.append(job) else: jobsToCommit[job.jobId] = job jobsToCommit = jobsToCommit.values() # dedup job list if not jobsToCommit: err = 'Job(s) already committed' return False, err allTroves = [] trovesByBranch = {} alreadyCommitted = False for job in jobsToCommit: mapping[job.jobId] = {} for trove in job.iterTroves(): allTroves.append(trove) troveVersion = trove.getVersion() if troveVersion.getHost() == reposName: if not troveVersion.branch().hasParentBranch(): message = ('Cannot commit filesystem cook %s - ' ' nowhere to commit to!' % trove.getName()) return False, message assert (allTroves) source = trovesource.SimpleTroveSource() if excludeSpecs: excludeSpecsWithContext = {} troveMap = {} for excludeSpec in excludeSpecs: if len(excludeSpec) == 4: context = excludeSpec[3] else: context = None excludeSpecsWithContext.setdefault(excludeSpec[:3], []).append(context) excludeSpecs = [x[:3] for x in excludeSpecs] for trove in allTroves: troveTup = (trove.getName().split(':')[0], trove.getVersion(), trove.getFlavor()) source.addTrove(*troveTup) troveMap.setdefault(troveTup, []).append(trove) source.searchAsDatabase() matches = source.findTroves(None, excludeSpecs, None, allowMissing=True) trvMatches = [] for excludeSpec, matchList in matches.iteritems(): contexts = excludeSpecsWithContext[excludeSpec] for match in matchList: for trv in troveMap[match]: if trv.context in contexts or None in contexts: trvMatches.append(trv) allTroves = [x for x in allTroves if x not in trvMatches] if not allTroves: message = ('All troves excluded - not committing') return False, message repos = conaryclient.getRepos() trovesByNBF = {} sourcesToCheck = [] branchMap = {} trovesToClone = [] for trove in allTroves: builtTroves = list(trove.iterBuiltTroves()) if not builtTroves: continue if builtTroves[0][1].getHost() != reposName: alreadyCommitted = True for n, v, f in builtTroves: trovesByNBF[n, v.branch(), f] = (trove, v) continue troveVersion = trove.getVersion() if troveVersion.getHost() == reposName: sourceTup = (trove.getName(), troveVersion, Flavor()) targetBranch = troveVersion.branch().parentBranch() branchMap[troveVersion.branch()] = targetBranch nbf = trove.getName(), targetBranch, Flavor() if nbf in trovesByNBF: if trovesByNBF[nbf][1] != troveVersion: badVersion = trovesByNBF[nbf][1] return False, ( "Cannot commit two different versions of source component %s:" " %s and %s" % (trove.getName(), troveVersion, badVersion)) trovesByNBF[nbf] = trove, troveVersion sourcesToCheck.append(sourceTup) if sourceOnly: continue for troveTup in builtTroves: branch = troveTup[1].branch() targetBranch = branch.parentBranch() # add mapping so that when the cloning is done # we can tell what commit resulted in what binaries. nbf = (troveTup[0], targetBranch, troveTup[2]) if nbf in trovesByNBF: otherBinary = trovesByNBF[nbf][0].getBinaryTroves()[0] if otherBinary[1].branch() == targetBranch: # this one's already committed. break # discard the later of the two commits. if trovesByNBF[nbf][0].getVersion() > trove.getVersion(): # we're the earlier one badTrove, badVersion = trovesByNBF[nbf] newTrove = trove newVersion = troveTup[1] else: badTrove = trove badVersion = troveTup[1] newTrove, newVersion = trovesByNBF[nbf] name = nbf[0] flavor = nbf[2] skipped = [] for badTroveTup in badTrove.iterBuiltTroves(): badNbf = (badTroveTup[0], targetBranch, badTroveTup[2]) if not ':' in badTroveTup[0]: skipped.append(badTroveTup[0]) if badNbf in trovesByNBF and badTrove is trovesByNBF[ badNbf][0]: del trovesByNBF[badNbf] skipped = '%s' % (', '.join(skipped)) log.warning("Not committing %s on %s[%s]%s - overridden by" " %s[%s]%s" % (skipped, badTroveTup[1], badTroveTup[2], badTrove.getContextStr(), newVersion, flavor, newTrove.getContextStr())) if trove is badTrove: break trovesByNBF[nbf] = trove, troveTup[1] branchMap[branch] = targetBranch for nbf, (trove, tupVersion) in trovesByNBF.items(): if tupVersion.branch() != nbf[1]: trovesToClone.append((nbf[0], tupVersion, nbf[2])) if not trovesToClone: if sourceOnly: err = 'Could not find sources to commit' elif alreadyCommitted: log.warning('All built troves have already been committed') return True, {} else: err = 'Can only commit built troves, none found' return False, err if sourcesToCheck and not commitOutdatedSources: outdated = _checkOutdatedSources(repos, sourcesToCheck) if outdated: outdated = ( '%s=%s (replaced by newer %s)' \ % (name, builtVer, newVer.trailingRevision()) for (name, builtVer, newVer) in outdated) err = ('The following source troves are out of date:\n%s\n\n' 'Use --commit-outdated-sources to commit anyway' % '\n'.join(outdated)) return False, err # only update build info if we'll be okay if some buildreqs are not # updated updateBuildInfo = compat.ConaryVersion().acceptsPartialBuildReqCloning() callback = callbacks.CloneCallback(conaryclient.cfg, message) passed, cs = conaryclient.createTargetedCloneChangeSet( branchMap, trovesToClone, updateBuildInfo=updateBuildInfo, cloneSources=False, trackClone=False, callback=callback, fullRecurse=False) if passed: oldTroves = [] for troveCs in cs.iterNewTroveList(): if troveCs.getOldVersion(): oldTroves.append(troveCs.getOldNameVersionFlavor()) if oldTroves: oldDict = {} for oldTrove in repos.getTroves(oldTroves): oldDict.setdefault(oldTrove.getNameVersionFlavor(), []).append(oldTrove) for troveCs in cs.iterNewTroveList(): if troveCs.getOldVersion(): trv = oldDict[troveCs.getOldNameVersionFlavor()].pop() trv.applyChangeSet(troveCs) else: trv = Trove(troveCs) for _, childVersion, _ in trv.iterTroveList(strongRefs=True, weakRefs=True): # make sure there are not any references to the internal # rmake repository - that would be a bad bug - easy to # do with the way we do cooking of groups. onRepos = childVersion.getHost() == reposName assert not onRepos, "Trove %s references repository" % trv n, v, f = troveCs.getNewNameVersionFlavor() trove, troveVersion = trovesByNBF[n, v.branch(), f] troveNVFC = trove.getNameVersionFlavor(withContext=True) # map jobId -> trove -> binaries mapping[trove.jobId].setdefault(troveNVFC, []).append((n, v, f)) else: return False, 'Creating clone failed' signatureKey = conaryclient.cfg.signatureKey if signatureKey and compat.ConaryVersion().signAfterPromote(): finalCs = signAbsoluteChangeset(cs, signatureKey) if writeToFile: cs.writeToFile(writeToFile) else: repos.commitChangeSet(cs, callback=callback) return True, mapping