コード例 #1
0
ファイル: dephandler.py プロジェクト: pombreda/rmake
 def _logDifferenceInPrebuiltReqs(self, trv, buildReqTups, preBuiltReqs):
     existsTrv = trove.Trove('@update',  
                            versions.NewVersion(),
                            deps.Flavor(), None)
     availableTrv = trove.Trove('@update', 
                                versions.NewVersion(), 
                                deps.Flavor(), None)
     for troveNVF in preBuiltReqs:
         existsTrv.addTrove(*troveNVF)
     for troveNVF in buildReqTups:
         availableTrv.addTrove(*troveNVF)
     jobs = availableTrv.diff(existsTrv)[2]
     formatter = display.JobTupFormatter(affinityDb=None)
     formatter.dcfg.setTroveDisplay(fullVersions=True,
                                    fullFlavors=True,
                                    showComponents=True)
     formatter.dcfg.setJobDisplay(compressJobs=True)
     formatter.prepareJobLists([jobs])
     self.logger.info('Could count %s=%s[%s]{%s} as prebuilt - the'
                      ' following changes have been made in its'
                      ' buildreqs:' % trv.getNameVersionFlavor(
                                                         withContext=True))
     for line in formatter.formatJobTups(jobs):
         self.logger.info(line)
     self.logger.info('...Rebuilding')
コード例 #2
0
ファイル: buildcmd.py プロジェクト: pombreda/rmake
def _getLocalCook(conaryclient, cfg, recipePath, message):
    if not hasattr(cook, 'getRecipeInfoFromPath'):
        raise errors.RmakeError('Local cooks require at least conary 1.0.19')
    recipeDir = os.path.dirname(recipePath)

    # We do not want to sign commits to the local repository, doing so
    # would require that we manage keys in this repository as well.
    oldKey = cfg.signatureKey
    oldMap = cfg.signatureKeyMap
    oldInteractive = cfg.interactive
    oldWorkDir = os.getcwd()
    try:
        cfg.signatureKey = None
        cfg.signatureKeyMap = {}
        cfg.interactive = False

        if os.access(recipeDir + '/CONARY', os.R_OK):
            os.chdir(recipeDir)
            stateFile = state.ConaryStateFromFile(recipeDir + '/CONARY')
            if stateFile.hasSourceState():
                stateFile = stateFile.getSourceState()
                if stateFile.getVersion() != versions.NewVersion():
                    return _shadowAndCommit(conaryclient, cfg, recipeDir, 
                                            stateFile, message)
                else:
                    return _commitRecipe(conaryclient, cfg, recipePath, message,
                                         branch=stateFile.getBranch())
        return _commitRecipe(conaryclient, cfg, recipePath, message)
    finally:
        cfg.signatureKey = oldKey
        cfg.signatureKeyMap = oldMap
        cfg.interactive = oldInteractive
        os.chdir(oldWorkDir)
コード例 #3
0
    def buildAfter(self, troveCache):
        after = trove.Trove("@tsupdate", versions.NewVersion(), deps.Flavor())

        # store the mapping of what changed for explicit troves; we peek
        # at this for CM simplification
        explicitTups = set()

        afterInfo = {}
        updateNames = set()
        for troveTup, inInstallSet, explicit in \
                  self.updateTroveSet._walk(troveCache, recurse = True):
            after.addTrove(troveTup[0], troveTup[1], troveTup[2])
            afterInfo[troveTup] = (inInstallSet, explicit)
            updateNames.add(troveTup[0])
            if explicit:
                explicitTups.add(troveTup)

        return after, afterInfo, updateNames, explicitTups
コード例 #4
0
    def buildBefore(self, troveCache, updateNames, installOverrides={}):
        before = trove.Trove("@tsupdate", versions.NewVersion(), deps.Flavor())

        beforeInfo = {}
        installSet = set()
        optionalSet = set()
        for troveTup, inInstallSet, explicit in \
                  self.primaryTroveSet._walk(troveCache, recurse = True,
                                             installSetOverrides =
                                                installOverrides):
            if troveTup[0] in updateNames:
                before.addTrove(troveTup[0], troveTup[1], troveTup[2])
                beforeInfo[troveTup] = (inInstallSet, explicit)
            elif explicit:
                if inInstallSet:
                    installSet.add(troveTup)
                else:
                    optionalSet.add(troveTup)

        return before, beforeInfo, installSet, optionalSet
コード例 #5
0
ファイル: commit.py プロジェクト: pombreda/rmake
def updateRecipes(repos, cfg, recipeList, committedSources):
    committedSourcesByNB = {}
    for name, version, flavor in committedSources:
        committedSourcesByNB[name, version.branch()] = version
    for recipe in recipeList:
        recipeDir = os.path.dirname(recipe)
        stateFilePath = recipeDir + '/CONARY'
        if not os.path.exists(stateFilePath):
            continue
        conaryStateFile = state.ConaryStateFromFile(stateFilePath)
        if not conaryStateFile.hasSourceState():
            continue
        context = conaryStateFile.getContext()
        stateFile = conaryStateFile.getSourceState()
        troveName = stateFile.getName()
        branch = stateFile.getBranch()
        if (troveName, branch) not in committedSourcesByNB:
            continue
        stateVersion = stateFile.getVersion()
        newVersion = committedSourcesByNB[troveName, branch]
        if stateVersion != versions.NewVersion():
            log.info('Updating %s after commit' % recipeDir)
            if compat.ConaryVersion().updateSrcTakesMultipleVersions():
                try:
                    # Added in CNY-3035
                    checkin.nologUpdateSrc(repos, [recipeDir])
                except checkin.builderrors.UpToDate:
                    pass  # Don't mention if the source is already up to date
                except checkin.builderrors.CheckinError, e:
                    e.logError()
                except AttributeError:
                    checkin.updateSrc(repos, [recipeDir])
            else:
                curDir = os.getcwd()
                try:
                    os.chdir(recipeDir)
                    checkin.updateSrc(repos)
                finally:
                    os.chdir(curDir)
コード例 #6
0
    def patchAction(self, data):
        before = trove.Trove("@tsupdate", versions.NewVersion(), deps.Flavor())

        after, afterInfo, updateNames, explicitTups = \
                    self.buildAfter(data.troveCache)
        before, beforeInfo, installSet, optionalSet = \
                    self.buildBefore(data.troveCache, updateNames)

        troveMapping = after.diff(before)[2]

        # populate the cache with timestamped versions as a bulk operation
        data.troveCache.getTimestamps(beforeInfo.keys() + afterInfo.keys())

        for (trvName, (oldVersion, oldFlavor), (newVersion, newFlavor),
             isAbsolute) in troveMapping:
            oldTuple = (trvName, oldVersion, oldFlavor)
            newTuple = (trvName, newVersion, newFlavor)
            self._handleTrove(data, beforeInfo, afterInfo, oldTuple, newTuple,
                              installSet, optionalSet)

        self.outSet._setInstall(installSet)
        self.outSet._setOptional(optionalSet)

        return True
コード例 #7
0
    def updateAction(self, data):
        # figure out which updates are from explictly named troves in the
        # update set
        after = trove.Trove("@tsupdateouter", versions.NewVersion(),
                            deps.Flavor())
        before = trove.Trove("@tsupdateouter", versions.NewVersion(),
                             deps.Flavor())
        names = set()
        for troveTup, inInstallSet, explicit in \
                  self.updateTroveSet._walk(data.troveCache, recurse = False):
            assert (inInstallSet)
            if explicit:
                after.addTrove(*troveTup)
                names.add(troveTup[0])

        beforeIncluded = {}
        for troveTup, inInstallSet, explicit in \
                  self.primaryTroveSet._walk(data.troveCache, recurse = True):
            if troveTup[0] in names:
                before.addTrove(*troveTup)
                beforeIncluded[troveTup] = inInstallSet

        troveMapping = after.diff(before)[2]
        self._completeMapping(troveMapping, before, after)
        del before, after, names

        # this doesn't really belong here, but we need this information
        # for old troves only on update
        data.troveCache.cacheTroves([
            (name, ) + oldInfo for (name, oldInfo, newInfo, _) in troveMapping
            if oldInfo[0] and newInfo[0]
        ])

        installOverrides = {}
        for (name, oldInfo, newInfo, absolute) in troveMapping:
            if oldInfo[0] is None or newInfo[0] is None:
                continue

            oldTuple = (name, ) + oldInfo
            if beforeIncluded[oldTuple]:
                continue

            installOverrides[oldTuple] = True

            for subTroveTup, subIsInstall, subIsExplicit in \
                            data.troveCache.iterTroveListInfo(oldTuple):
                installOverrides[subTroveTup] = (installOverrides.get(
                    subTroveTup, False) or subIsInstall)

        after, afterInfo, updateNames, explicitTups = \
                    self.buildAfter(data.troveCache)
        before, beforeInfo, installSet, optionalSet = \
                    self.buildBefore(data.troveCache, updateNames,
                                     installOverrides = installOverrides)

        troveMapping = after.diff(before)[2]
        self._completeMapping(troveMapping, before, after)

        self.outSet.updateMap = {}
        for (trvName, (oldVersion, oldFlavor), (newVersion, newFlavor),
             isAbsolute) in troveMapping:
            oldTuple = (trvName, oldVersion, oldFlavor)
            newTuple = (trvName, newVersion, newFlavor)
            self._handleTrove(data, beforeInfo, afterInfo, oldTuple, newTuple,
                              installSet, optionalSet)

            if newTuple in explicitTups:
                if oldTuple[1] is not None and oldTuple != newTuple:
                    self.outSet.updateMap[newTuple] = oldTuple
                else:
                    self.outSet.updateMap[newTuple] = None

        self.outSet._setInstall(installSet)
        self.outSet._setOptional(optionalSet)

        return True
コード例 #8
0
    def _updateFromTroveSetGraph(self,
                                 uJob,
                                 troveSet,
                                 troveCache,
                                 split=True,
                                 fromChangesets=[],
                                 criticalUpdateInfo=None,
                                 applyCriticalOnly=False,
                                 restartInfo=None,
                                 callback=None,
                                 ignoreMissingDeps=False):
        """
        Populates an update job based on a set of trove update and erase
        operations.If self.cfg.autoResolve is set, dependencies
        within the job are automatically closed. Returns a mapping with
        suggestions for possible dependency resolutions.

        @param uJob: A L{conary.local.database.UpdateJob} object
        @type uJob: L{conary.local.database.UpdateJob}
        @param split: Split large update operations into separate jobs.
                      This must be true (False broke how we
                      handle users and groups, which requires info- packages
                      to be installed first and in separate jobs) if you
                      intend to install the job. We allow False here because
                      we don't need to do these calculations when the jobs
                      are being constructed for other reasons.
        @type split: bool
        @param fromChangesets: When specified, this list of
        L{changeset.ChangeSetFromFile} objects is used as the source of troves,
        instead of the repository.
        @type fromChangesets: list
        @param criticalUpdateInfo: Settings and data needed for critical
        updates
        @type criticalUpdateInfo: L{CriticalUpdateInfo}
        @param applyCriticalOnly: apply only the critical update.
        @type applyCriticalOnly: bool
        @param restartInfo: If specified, overrides itemList. It specifies the
        location where the rest of an update job run was stored (after
        applying the critical update).
        @type restartInfo: string
        @param ignoreMissingDeps: Do not raise DepResolutionFailure on
        unresolved dependencies
        @tye ignoreMissingDeps: bool
        @rtype: dict

        @raise ConaryError: if a C{sync} operation was requested, and
            relative changesets were specified.

        @raise DepResolutionFailure: could not resolve dependencies

        @raise InternalConaryError: if a jobset was inconsistent.

        @raise UpdateError: Generic update error.

        @raise MissingTrovesError: if one of the requested troves could not
            be found.

        @raise other: Callbacks may generate exceptions on their own. See
            L{update.ClientUpdate.applyUpdateJob} for an explanation of
            the behavior of exceptions within callbacks.
        """
        def _updateJob(job, addedTroves):
            for newTup in addedTroves:
                # First look for an exact erase or update in the old job that
                # would remove this trove
                erases = [x for x in job if (x[0], x[1][0], x[1][1]) == newTup]
                if erases:
                    if len(erases) > 1 or erases[0][2][0] is not None:
                        # Corner case, fall back to doing a full diff
                        return False
                    # We're adding back a trove that the job would have erased
                    # and the two annihilate each other
                    job.remove(erases[0])
                    continue

                # Then look for a name-only match against an erase
                erases = [
                    x for x in job if x[0] == newTup[0] and x[2][0] is None
                ]
                if erases:
                    if len(erases) > 1:
                        # Corner case
                        return False
                    # Convert this erasure into an update
                    job.remove(erases[0])
                    oldVF = erases[0][1]
                    job.append((newTup[0], oldVF, newTup[1:3], False))
                    continue

                # No match, it's a new install
                job.append((newTup[0], (None, None), newTup[1:3], True))
            return True

        if criticalUpdateInfo is None:
            criticalUpdateInfo = update.CriticalUpdateInfo()

        searchPath = troveSet.searchPath

        if callback:
            callback.executingSystemModel()

        #depSearch = CMLSearchPath([ preFetch, searchPath ],
        #graph = preFetch.g)
        depSearch = searchPath
        troveSet.g.realize(
            CMLActionData(troveCache, self.cfg.flavor[0], self.repos,
                          self.cfg))

        existsTrv = trove.Trove("@model", versions.NewVersion(), deps.Flavor(),
                                None)
        targetTrv = trove.Trove("@model", versions.NewVersion(), deps.Flavor(),
                                None)

        pins = set()
        phantomsByName = {}
        for tup, pinned in self.db.iterAllTroves(withPins=True):
            existsTrv.addTrove(*tup)
            if pinned:
                pins.add(tup)
                targetTrv.addTrove(*tup)
            if tup[1].onPhantomLabel():
                phantomsByName.setdefault(tup[0], set()).add(tup)

        for tup, inInstall, explicit in \
                                troveSet._walk(troveCache, recurse = True):
            if inInstall and tup[0:3] not in pins:
                targetTrv.addTrove(*tup[0:3])

        self._closePackages(troveCache, targetTrv)

        if phantomsByName and self.cfg.syncCapsuleDatabase == 'update':
            # Allow phantom troves to be updated to a real trove, but preserve
            # ones that would be erased outright.
            for tup in targetTrv.iterTroveList(strongRefs=True):
                name = tup[0]
                if existsTrv.hasTrove(*tup):
                    # This particular trove is not replacing anything
                    continue
                if name in phantomsByName:
                    # Could be replacing a phantom trove, so keep the latter in
                    # the old set so it will be updated.
                    del phantomsByName[name]
            # Discard any unmatched phantom troves from the old set so that
            # they will be left alone.
            for tups in phantomsByName.itervalues():
                for tup in tups:
                    existsTrv.delTrove(missingOkay=False, *tup)

        job = targetTrv.diff(existsTrv, absolute=False)[2]

        if callback:
            callback.resolvingDependencies()

        # don't resolve against local troves (we can do this because either
        # they're installed and show up in the unresolveable list or they
        # aren't installed and we don't know about them) or troves which are
        # in the install set (since they're already in the install set,
        # adding them to the install set won't help)
        depDb = deptable.DependencyDatabase()
        depResolveSource = depSearch._getResolveSource(
            depDb=depDb,
            filterFn=lambda n, v, f:
            (v.isOnLocalHost() or targetTrv.isStrongReference(n, v, f)))
        resolveMethod = depResolveSource.getResolveMethod()

        uJob.setSearchSource(self.getSearchSource())
        # this is awful
        jobTroveSource = uJob.getTroveSource()
        jobTroveSource.addChangeSets(fromChangesets, includesFileContents=True)

        pathHashCache = {}

        resolveDeps = split = True
        if resolveDeps or split:
            check = self.db.getDepStateClass(
                troveCache,
                findOrdering=split,
                ignoreDepClasses=self.cfg.ignoreDependencies)

            linkedJobs = self._findOverlappingJobs(job,
                                                   troveCache,
                                                   pathHashCache=pathHashCache)

            criticalJobs = criticalUpdateInfo.findCriticalJobs(job)
            finalJobs = criticalUpdateInfo.findFinalJobs(job)
            criticalOnly = criticalUpdateInfo.isCriticalOnlyUpdate()

            log.info("resolving dependencies")
            result = check.depCheck(job,
                                    linkedJobs=linkedJobs,
                                    criticalJobs=criticalJobs,
                                    finalJobs=finalJobs,
                                    criticalOnly=criticalOnly)

            suggMap = {}
            while True:
                added = set()
                for (needingTup, neededDeps, neededTupList) in \
                                                result.unresolveableList:
                    for neededTup in neededTupList:
                        if (neededTup not in added
                                and searchPath.hasOptionalTrove(neededTup)):
                            log.info("keeping installed trove for deps %s",
                                     neededTup)
                            added.add(neededTup)

                if not added:
                    unsatisfied = result.unsatisfiedList
                    unsatisfied += [x[0:2] for x in result.unresolveableList]

                    while (resolveMethod.prepareForResolution(unsatisfied)
                           and not added):
                        sugg = resolveMethod.resolveDependencies()
                        newJob = resolveMethod.filterSuggestions(
                            result.unsatisfiedList, sugg, suggMap)
                        for (name, oldInfo, newInfo, isAbsolute) in newJob:
                            assert (isAbsolute)
                            log.info("adding for dependency %s", name)
                            added.add((name, newInfo[0], newInfo[1]))

                if not added:
                    break

                for troveTup in added:
                    targetTrv.addTrove(*troveTup)

                added.update(
                    self._closePackages(troveCache, targetTrv,
                                        newTroves=added))

                # try to avoid a diff here
                if not _updateJob(job, added):
                    job = targetTrv.diff(existsTrv, absolute=False)[2]

                log.info("resolving dependencies (job length %d)", len(job))
                criticalJobs = criticalUpdateInfo.findCriticalJobs(job)
                finalJobs = criticalUpdateInfo.findFinalJobs(job)
                criticalOnly = criticalUpdateInfo.isCriticalOnlyUpdate()

                linkedJobs = self._findOverlappingJobs(
                    job, troveCache, pathHashCache=pathHashCache)

                result = check.depCheck(job,
                                        linkedJobs=linkedJobs,
                                        criticalJobs=criticalJobs,
                                        finalJobs=finalJobs,
                                        criticalOnly=criticalOnly)

            check.done()
            log.info("job dependency closed; %s jobs resulted", len(job))

            # if any of the things we're about to install or remove use
            # capsules we cannot split the job
            if not split:
                splitJob = [job]
                criticalUpdates = []
            else:
                splitJob = result.getChangeSetList()
                criticalUpdates = [
                    splitJob[x] for x in result.getCriticalUpdates()
                ]

            if result.unsatisfiedList and (not ignoreMissingDeps):
                raise update.DepResolutionFailure(self.cfg,
                                                  result.unsatisfiedList,
                                                  suggMap,
                                                  result.unresolveableList,
                                                  splitJob, criticalUpdates)
            elif result.unresolveableList and (not ignoreMissingDeps):
                # this can't happen because dep resolution empties
                # the unresolveableList into the unsatisfiedList to try
                # and find matches
                assert (0)
        else:
            (depList, suggMap, cannotResolve, splitJob, keepList,
             criticalUpdates) = ([], {}, [], [job], [], [])

        # this prevents us from using the changesetList as a searchSource
        log.info("processing job list")
        self._processCMLJobList(job, uJob, troveCache)
        log.info("gathering group defined path conflicts")
        self._findAcceptablePathConflicts(job, uJob, troveCache)
        log.info("combining jobs")
        self._combineJobs(uJob, splitJob, criticalUpdates)
        uJob.reorderPreScripts(criticalUpdateInfo)
        uJob.setTransactionCounter(self.db.getTransactionCounter())

        # remove things from the suggMap which are in the already installed
        # set
        for neededSet in suggMap.itervalues():
            for troveTup in set(neededSet):
                if existsTrv.hasTrove(*troveTup):
                    neededSet.remove(troveTup)

        for needingTroveTup, neededSet in suggMap.items():
            if not neededSet:
                del suggMap[needingTroveTup]

        if callback:
            callback.done()

        return suggMap