Beispiel #1
0
 def _groupsort(a, b):
     ret = cmp(a[0][0], b[0][0])
     if ret:
         return ret
     # if they have the same mark, sort the groups at the end
     ahasgrp = [x[1][1] for x in a if trove.troveIsGroup(x[1][0])]
     bhasgrp = [x[1][1] for x in b if trove.troveIsGroup(x[1][0])]
     if len(ahasgrp) > len(bhasgrp):
         return 1
     if len(bhasgrp) > len(ahasgrp):
         return -1
     return cmp(ahasgrp, bhasgrp)
Beispiel #2
0
 def _groupsort(a, b):
     ret = cmp(a[0][0], b[0][0])
     if ret:
         return ret
     # if they have the same mark, sort the groups at the end
     ahasgrp = [x[1][1] for x in a if trove.troveIsGroup(x[1][0])]
     bhasgrp = [x[1][1] for x in b if trove.troveIsGroup(x[1][0])]
     if len(ahasgrp) > len(bhasgrp):
         return 1
     if len(bhasgrp) > len(ahasgrp):
         return -1
     return cmp(ahasgrp, bhasgrp)
Beispiel #3
0
def recurseTrove(sourceRepos,
                 name,
                 version,
                 flavor,
                 callback=ChangesetCallback()):
    global recursedGroups
    assert (trove.troveIsGroup(name))
    # there's nothing much we can recurse from the source
    if name.endswith(":source"):
        return []
    # avoid grabbing the same group multiple times
    if (name, version, flavor) in recursedGroups:
        return []
    log.debug("recursing group trove: %s=%s[%s]" % (name, version, flavor))
    groupCs = sourceRepos.createChangeSet([(name, (None, None),
                                            (version, flavor), True)],
                                          withFiles=False,
                                          withFileContents=False,
                                          recurse=False,
                                          callback=callback)
    recursedGroups.add((name, version, flavor))
    ret = []
    for troveCs in groupCs.iterNewTroveList():
        for name, ops in troveCs.iterChangedTroves(True, True):
            for oper, version, flavor, byDefault in ops:
                if oper != '-':
                    ret.append((name, version, flavor))
    return ret
Beispiel #4
0
    def _split():
        # Stop adding troves to this job and allow its troves to be used for
        # the next job's relative changesets.
        for mark, job in jobList[-1]:
            name = job[0]
            if trove.troveIsGroup(name):
                continue
            oldVersion, oldFlavor = job[1]
            newVersion, newFlavor = job[2]

            srcAvailable[(name, newVersion, newFlavor)] = True
            d = latestAvailable.setdefault(name, {})

            if oldVersion in d and oldVersion.branch() == newVersion.branch():
                # If the old version is on the same branch as the new one,
                # replace the old with the new. If it's on a different
                # branch, we'll track both.
                flavorList = d[oldVersion]
                flavorList.discard(oldFlavor)
                if not flavorList:
                    del d[oldVersion]

            flavorList = d.setdefault(newVersion, set())
            flavorList.add(newFlavor)
        if jobList[-1]:
            jobList.append([])
Beispiel #5
0
    def search(self, request, hostname):
        name = request.GET.get('name', None)
        label = request.GET.get('label', None)
        latest = request.GET.get('latest', False)
        searchType = request.GET.get('type', None)
        checkFnDict  = {'group'  : trove.troveIsGroup,
                        'source' : trove.troveIsSourceComponent,
                        'fileset' : trove.troveIsFileSet,
                        'package' : lambda x: (trove.troveIsCollection(x) 
                                               and not trove.troveIsGroup(x)),
                        None: None}
        if searchType not in checkFnDict:
            # XXX We probably want to use exceptions instead of direct maps to
            # an error code
            #raise errors.InvalidSearchType(searchType)
            return response.Response('Invalid search type %s' % searchType,
                                     status = 400)

        checkFn = checkFnDict[searchType]
        repos = self.getRepos()
        if latest:
            queryFn = repos.getTroveLatestByLabel
        else:
            queryFn = repos.getTroveVersionsByLabel

        if not label:
            return response.Response('Label not specified', status = 400)

        try:
            label = versions.Label(label)
        except versions.ParseError, e:
            return response.Response(
                'Error parsing label %s: %s' % (label, e), status = 400)
Beispiel #6
0
def rdiffCommand(cfg, client, db, diffSpec, **kw):
    troveSpec = cmdline.parseChangeList(diffSpec)[0]

    if troveSpec[1][0] is None:
        # Most likely, syntax did not specify <old>--<new>
        return -1

    kw.setdefault('recurse', None)
    kw.setdefault('asDiff', False)

    if kw['recurse'] is None:
        kw['recurse'] = (trove.troveIsCollection(troveSpec[0][0])
                         and not trove.troveIsGroup(troveSpec[0][0]))

    primaryCsList = cscmd.computeTroveList(client, [troveSpec])
    if (primaryCsList[0][1] == primaryCsList[0][2]):
        # Diffing against ourselves
        print "Identical troves"
        return 1

    cs = client.createChangeSet(primaryCsList,
                                withFiles=True,
                                withFileContents=kw['asDiff'],
                                recurse=kw['recurse'])
    showchangeset.displayChangeSet(db, cs, None, cfg, **kw)
Beispiel #7
0
    def _fetch(self, actionList, data):
        troveTuples = set()

        for action in actionList:
            troveTuples.update(
                troveTup
                for troveTup, inInstall, isExplicit in action.primaryTroveSet.
                _walk(data.troveCache, newGroups=False, recurse=True)
                if (inInstall and (
                    trove.troveIsGroup(troveTup[0]) or isExplicit)))

        data.troveCache.getTroves(troveTuples, withFiles=False)
Beispiel #8
0
    def _fetch(self, actionList, data):
        troveTuples = set()

        for action in actionList:
            troveTuples.update(troveTup for troveTup, inInstall, isExplicit in
                                 action.primaryTroveSet._walk(data.troveCache,
                                                 newGroups = False,
                                                 recurse = True)
                            if (inInstall and
                                (trove.troveIsGroup(troveTup[0]) or isExplicit)
                               ) )

        data.troveCache.getTroves(troveTuples, withFiles = False)
Beispiel #9
0
    def _findAcceptablePathConflicts(self, jobList, uJob, troveCache):
        troveInfoNeeded = []
        for job in jobList:
            if trove.troveIsGroup(job[0]) and job[2][0] is not None:
                troveInfoNeeded.append((job[0],) + job[2])

        troveInfo = troveCache.getTroveInfo(
                        trove._TROVEINFO_TAG_PATHCONFLICTS, troveInfoNeeded)
        troveInfoDict = dict( (tt, ti) for (tt, ti) in
                                itertools.izip(troveInfoNeeded, troveInfo) )

        uJob.setAllowedPathConflicts(
            self.db.buildPathConflictExceptions(jobList, troveInfoDict.get))
Beispiel #10
0
    def _findAcceptablePathConflicts(self, jobList, uJob, troveCache):
        troveInfoNeeded = []
        for job in jobList:
            if trove.troveIsGroup(job[0]) and job[2][0] is not None:
                troveInfoNeeded.append((job[0], ) + job[2])

        troveInfo = troveCache.getTroveInfo(trove._TROVEINFO_TAG_PATHCONFLICTS,
                                            troveInfoNeeded)
        troveInfoDict = dict(
            (tt, ti)
            for (tt, ti) in itertools.izip(troveInfoNeeded, troveInfo))

        uJob.setAllowedPathConflicts(
            self.db.buildPathConflictExceptions(jobList, troveInfoDict.get))
Beispiel #11
0
 def files(self, auth, t, v, f):
     v = versions.ThawVersion(v)
     f = deps.ThawFlavor(f)
     parentTrove = self.repos.getTrove(t, v, f, withFiles = False)
     # non-source group troves only show contained troves
     if trove.troveIsGroup(t):
         troves = sorted(parentTrove.iterTroveList(strongRefs=True))
         return self._write("group_contents", troveName = t, troves = troves)
     fileIters = []
     for n, v, f in self.repos.walkTroveSet(parentTrove, withFiles = False):
         files = self.repos.iterFilesInTrove(n, v, f,
             withFiles = True,
             sortByPath = True)
         fileIters.append(files)
     return self._write("files",
         troveName = t,
         fileIters = itertools.chain(*fileIters))
Beispiel #12
0
    def getLatestPackagesOnLabel(self, label, keepComponents=False,
      keepGroups=False):
        client = self._getConaryClient()
        label = self._getLabel(label)
        results = client.getRepos().getTroveLatestByLabel(
            {None: {label: [None]}})

        packages = []
        for name, versiondict in results.iteritems():
            if ':' in name and not keepComponents:
                continue
            elif trove.troveIsGroup(name) and not keepGroups:
                continue

            for version, flavors in versiondict.iteritems():
                for flavor in flavors:
                    packages.append((name, version, flavor))
        return packages
Beispiel #13
0
    def getLatestPackagesOnLabel(self, label, keepComponents=False,
      keepGroups=False):
        client = self._getConaryClient()
        label = self._getLabel(label)
        results = client.getRepos().getTroveLatestByLabel(
            {None: {label: [None]}})

        packages = []
        for name, versiondict in results.iteritems():
            if ':' in name and not keepComponents:
                continue
            elif trove.troveIsGroup(name) and not keepGroups:
                continue

            for version, flavors in versiondict.iteritems():
                for flavor in flavors:
                    packages.append((name, version, flavor))
        return packages
Beispiel #14
0
 def files(self, auth, t, v, f):
     v = versions.ThawVersion(v)
     f = deps.ThawFlavor(f)
     parentTrove = self.repos.getTrove(t, v, f, withFiles=False)
     # non-source group troves only show contained troves
     if trove.troveIsGroup(t):
         troves = sorted(parentTrove.iterTroveList(strongRefs=True))
         return self._write("group_contents", troveName=t, troves=troves)
     fileIters = []
     for n, v, f in self.repos.walkTroveSet(parentTrove, withFiles=False):
         files = self.repos.iterFilesInTrove(n,
                                             v,
                                             f,
                                             withFiles=True,
                                             sortByPath=True)
         fileIters.append(files)
     return self._write("files",
                        troveName=t,
                        fileIters=itertools.chain(*fileIters))
Beispiel #15
0
    def __init__(self, troveName, version=None):
        """
        Initializes a TroveMissing exception.

        @param troveName: trove which could not be found
        @type troveName: str
        @param version: version of the trove which does not exist
        @type version: versions.Version, VFS string or [versions.Version]
        """
        self.troveName = troveName
        self.version = version
        if trove.troveIsGroup(troveName):
            self.type = 'group'
        elif trove.troveIsFileSet(troveName):
            self.type = 'fileset'
        elif trove.troveIsComponent(troveName):
            self.type = 'component'
        else:
            self.type = 'package'
Beispiel #16
0
    def __init__(self, troveName, version = None):
        """
        Initializes a TroveMissing exception.

        @param troveName: trove which could not be found
        @type troveName: str
        @param version: version of the trove which does not exist
        @type version: versions.Version, VFS string or [versions.Version]
        """
        self.troveName = troveName
        self.version = version
        if trove.troveIsGroup(troveName):
            self.type = 'group'
        elif trove.troveIsFileSet(troveName):
            self.type = 'fileset'
        elif trove.troveIsComponent(troveName):
            self.type = 'component'
        else:
            self.type = 'package'
Beispiel #17
0
def recurseTrove(sourceRepos, name, version, flavor,
                 callback = ChangesetCallback()):
    global recursedGroups
    assert(trove.troveIsGroup(name))
    # there's nothing much we can recurse from the source
    if name.endswith(":source"):
        return []
    # avoid grabbing the same group multiple times
    if (name, version, flavor) in recursedGroups:
        return []
    log.debug("recursing group trove: %s=%s[%s]" % (name, version, flavor))
    groupCs = sourceRepos.createChangeSet(
        [(name, (None, None), (version, flavor), True)],
        withFiles=False, withFileContents=False, recurse=False,
        callback = callback)
    recursedGroups.add((name, version, flavor))
    ret = []
    for troveCs in groupCs.iterNewTroveList():
        for name, ops in troveCs.iterChangedTroves(True, True):
            for oper, version, flavor, byDefault in ops:
                if oper != '-':
                    ret.append((name, version, flavor))
    return ret
Beispiel #18
0
def rdiffCommand(cfg, client, db, diffSpec, **kw):
    troveSpec = cmdline.parseChangeList(diffSpec)[0]

    if troveSpec[1][0] is None:
        # Most likely, syntax did not specify <old>--<new>
        return -1

    kw.setdefault('recurse', None)
    kw.setdefault('asDiff', False)

    if kw['recurse'] is None:
        kw['recurse'] = (trove.troveIsCollection(troveSpec[0][0]) and
                         not trove.troveIsGroup(troveSpec[0][0]))

    primaryCsList = cscmd.computeTroveList(client, [ troveSpec ])
    if (primaryCsList[0][1] == primaryCsList[0][2]):
        # Diffing against ourselves
        print "Identical troves"
        return 1

    cs = client.createChangeSet(primaryCsList, withFiles=True,
                                withFileContents=kw['asDiff'],
                                recurse=kw['recurse'])
    showchangeset.displayChangeSet(db, cs, None, cfg, **kw)
Beispiel #19
0
    def mineLabel(self, labelText, jiraProject):

        print 'Looking at %s product...' % jiraProject

        sourceMap = {}
        sourceOwner = {}
        label = versions.Label(labelText)

        repoPkgs = frozenset([
            x for x in self.repos.troveNames(label)
            if ':' not in x and not (x.startswith('cross-') or x.startswith(
                'bootstrap-') or trove.troveIsGroup(x))
        ])

        cu = self.db.cursor()
        cu.execute(
            """SELECT component.cname
                      FROM component, project
                      WHERE component.project = project.id
                        AND project.pname = %s""", jiraProject)
        jiraPkgs = frozenset([r[0] for r in cu.fetchall()])

        newPkgs = sorted(list(repoPkgs - jiraPkgs))

        troveVersions = self.repos.getTroveLeavesByLabel(
            dict.fromkeys(newPkgs, {label: None}))

        for troveName in newPkgs:
            self.log('checking binary package ' + troveName)
            # need latest version
            troveVersion = sorted(troveVersions[troveName].keys())[-1]
            # we only need one flavor, any flavor, to get the sourceName
            troveFlavor = troveVersions[troveName][troveVersion][0]
            trove = self.repos.getTrove(troveName,
                                        troveVersion,
                                        troveFlavor,
                                        withFiles=False)
            if trove.isRedirect():
                # We do not want to modify jira automatically when we
                # see a redirect, because the redirect may not apply to
                # all versions, and we might really want to keep existing
                # versions the same.
                self.log(' ...ignoring redirected trove ' + troveName)
                continue

            sourceName = trove.getSourceName()
            if not sourceName:
                # old package from before troveinfo
                continue
            sourceNick = sourceName.split(':')[0]
            if sourceNick in jiraPkgs:
                # database doesn't like double-adds
                self.log(' ...source trove %s already in jira' % sourceNick)
                continue
            if sourceNick in sourceMap:
                sourceMap[sourceNick][trove.getName()] = True
                # only investigate each source trove once
                self.log(' ...already checked source trove ' + sourceNick)
                continue
            sourceMap[sourceNick] = {trove.getName(): True}

            sourceVerList = self.repos.getTroveVersionsByLabel(
                {sourceName: {
                    label: None
                }})
            sourceVerList = sorted(sourceVerList[sourceName].keys())
            l = []
            for sourceVer in sourceVerList:
                l.extend(((sourceName, sourceVer, deps.Flavor()), ))
            sourceTroves = self.repos.getTroves(l)

            personMap = {}
            firstPerson = None
            for sourceTrove in sourceTroves:
                cl = sourceTrove.getChangeLog()
                person = self.getPerson(cl.getName(), labelText)
                if not firstPerson:
                    firstPerson = person
                if person in personMap:
                    personMap[person] += 1
                else:
                    personMap[person] = 1
            if firstPerson:
                # original committer is more likely to be the responsible party
                personMap[firstPerson] += 3

            candidate = sorted(personMap.items(), key=lambda x: x[1])[-1][0]
            if not candidate:
                print "No best owner recognized for %s" % sourceNick
                continue
            sourceOwner[sourceNick] = candidate
            print " Best owner for source %s is %s" % (sourceNick,
                                                       sourceOwner[sourceNick])

        self.sourceMap[jiraProject] = sourceMap
        self.sourceOwner[jiraProject] = sourceOwner
Beispiel #20
0
def buildJobList(src, target, groupList, absolute = False):
    # Match each trove with something we already have; this is to mirror
    # using relative changesets, which is a lot more efficient than using
    # absolute ones.
    q = {}
    srcAvailable = {}
    for group in groupList:
        for mark, (name, version, flavor) in group:
            # force groups to always be transferred using absolute changesets
            if trove.troveIsGroup(name):
                continue
            srcAvailable[(name,version,flavor)] = True
            d = q.setdefault(name, {})
            l = d.setdefault(version.branch(), [])
            l.append(flavor)

    # check that the latestavailable versions from the target are
    # present on the source to be able to use relative changesets
    latestAvailable = {}
    if len(q):
        latestAvailable = target.getTroveLeavesByBranch(q)
    if len(latestAvailable):
        def _tol(d):
            for n, vd in d.iteritems():
                for v, fl in vd.iteritems():
                    for f in fl:
                        yield (n,v,f)
        ret = src.hasTroves(list(_tol(latestAvailable)), hidden=True)
        srcAvailable.update(ret)

    # we'll keep latestAvailable in sync with what the target will look like
    # as the mirror progresses
    jobList = []
    for group in groupList:
        groupJobList = []
        # for each job find what it's relative to and build up groupJobList
        # as the job list for this group
        for mark, (name, version, flavor) in group:
            # name, version, versionDistance, flavorScore
            currentMatch = (None, None, None, None)
            if absolute or name not in latestAvailable:
                job = (name, (None, None), (version, flavor), True)
            else:
                d = latestAvailable[name]
                for repVersion, flavorList in d.iteritems():
                    # the versions have to be on the same host to be
                    # able to generate relative changesets
                    if version.getHost() != repVersion.getHost():
                        continue
                    for repFlavor in flavorList:
                        if not srcAvailable.get((name, repVersion, repFlavor), False):
                            continue
                        score = flavor.score(repFlavor)
                        if score is False:
                            continue
                        if repVersion == version:
                            closeness = 100000
                        else:
                            closeness = version.closeness(repVersion)
                        if score < currentMatch[3]:
                            continue
                        elif score > currentMatch[3]:
                            currentMatch = (repVersion, repFlavor, closeness,
                                            score)
                        elif closeness < currentMatch[2]:
                            continue
                        else:
                            currentMatch = (repVersion, repFlavor, closeness,
                                            score)

                job = (name, (currentMatch[0], currentMatch[1]),
                              (version, flavor), currentMatch[0] is None)

            groupJobList.append((mark, job))

        # now iterate through groupJobList and update latestAvailable to
        # reflect the state of the mirror after this job completes
        for mark, job in groupJobList:
            name = job[0]
            if trove.troveIsGroup(name):
                continue
            oldVersion, oldFlavor = job[1]
            newVersion, newFlavor = job[2]

            srcAvailable[(name, newVersion, newFlavor)] = True
            d = latestAvailable.setdefault(name, {})

            if oldVersion in d and oldVersion.branch() == newVersion.branch():
                # If the old version is on the same branch as the new one,
                # replace the old with the new. If it's on a different
                # branch, we'll track both.
                d[oldVersion].remove(oldFlavor)
                if not d[oldVersion]: del d[oldVersion]

            flavorList = d.setdefault(newVersion, [])
            flavorList.append(newFlavor)

        jobList.append(groupJobList)

    return jobList
Beispiel #21
0
def expandJobList(db, chgSetList, recurse):
    """
    For each job in the list, find the set of troves which are recursively
    included in it. The reutnr value is list parallel to chgSetList, each
    item of which is a sorted list of those troves which are included in the
    recursive changeset.
    """
    # We mark old groups (ones without weak references) as uncachable
    # because they're expensive to flatten (and so old that it
    # hardly matters).

    if not recurse:
        return [ [ job ] for job in chgSetList ]

    cu = db.cursor()
    schema.resetTable(cu, "tmpNVF")

    foundGroups = set()
    foundWeak = set()
    foundCollections = set()

    insertList = []

    for jobId, job in enumerate(chgSetList):
        if trove.troveIsGroup(job[0]):
            foundGroups.add(jobId)

        insertList.append((jobId, job[0], job[2][0], job[2][1]))

    db.bulkload("tmpNvf", insertList,
                     [ "idx", "name", "version", "flavor" ],
                     start_transaction = False)

    db.analyze("tmpNVF")

    newJobList = [ [ job ] for job in chgSetList ]

    cu.execute("""SELECT
            tmpNVF.idx, I_Items.item, I_Versions.version,
            I_Flavors.flavor, TroveTroves.flags
        FROM tmpNVF JOIN Items ON tmpNVF.name = Items.item
        JOIN Versions ON (tmpNVF.version = Versions.version)
        JOIN Flavors ON (tmpNVF.flavor = Flavors.flavor)
        JOIN Instances ON
            Items.itemId = Instances.itemId AND
            Versions.versionId = Instances.versionId AND
            Flavors.flavorId = Instances.flavorId
        JOIN TroveTroves USING (instanceId)
        JOIN Instances AS I_Instances ON
            TroveTroves.includedId = I_Instances.instanceId
        JOIN Items AS I_Items ON
            I_Instances.itemId = I_Items.itemId
        JOIN Versions AS I_Versions ON
            I_Instances.versionId = I_Versions.versionId
        JOIN Flavors AS I_Flavors ON
            I_Instances.flavorId = I_Flavors.flavorId
        WHERE
            I_Instances.isPresent = 1
        ORDER BY
            I_Items.item, I_Versions.version, I_Flavors.flavor
    """)

    for (idx, name, version, flavor, flags) in cu:
        newJobList[idx].append( (name, (None, None),
                                       (version, flavor), True) )
        if flags & schema.TROVE_TROVES_WEAKREF > 0:
            foundWeak.add(idx)
        if trove.troveIsCollection(name):
            foundCollections.add(idx)

    for idx in ((foundGroups & foundCollections) - foundWeak):
        # groups which contain collections but no weak refs
        # are uncachable
        newJobList[idx] = None

    return newJobList
Beispiel #22
0
    def augment(self, model, totalSearchSet, finalTroveSet):
        collections = set()
        for op in model.modelOps:
            if isinstance(op, model.SearchOperation):
                continue

            for troveTup in op:
                name = troveTup[0]
                if (isinstance(op, model.OfferTroveOperation) or
                    trove.troveIsComponent(name)):
                    collections.add(name.split(':')[0])
                elif trove.troveIsGroup(name):
                    collections.add(name)

        # this represents the path from "search" lines
        newSearchPath = []
        rebuildTotalSearchSet = False
        # the "total search" searches the current troveset first, then the
        # search path. we only reset this when an operation changed the
        # working troveset in a way which would affect later operations,
        # after searchTroveSet chagnes
        # changed the current troveset in a way which a

        # finalTroveSet is the current working set of what's been selected
        # so far

        for op in model.modelOps:
            if isinstance(op, model.SearchOperation):
                partialTup = op.item
                if isinstance(partialTup, versions.Label):
                    newSearchTroveSet = troveset.SearchSourceTroveSet(
                            searchsource.NetworkSearchSource(self.repos,
                                                             [ partialTup ],
                                                             self.flavor),
                            graph = self.g)
                    newSearchSet = newSearchTroveSet
                elif partialTup[0] is not None:
                    newSearchSet = self.reposTroveSet.find(partialTup)
                else:
                    assert(0)

                newSearchPath.insert(0, newSearchSet)
                rebuildTotalSearchSet = True
                continue

            searchSpecs = []
            localSpecs = []
            for troveSpec in op:
                if (troveSpec.version is not None and
                                    troveSpec.version[0] == '/'):
                    try:
                        verObj = versions.VersionFromString(troveSpec.version)
                        if verObj.isInLocalNamespace():
                            localSpecs.append(troveSpec)
                            continue

                    except (errors.VersionStringError, errors.ParseError):
                        pass

                searchSpecs.append(troveSpec)

            if isinstance(op, model.EraseTroveOperation):
                eraseMatches = self._splitFind(self.EraseFindAction,
                                               finalTroveSet, searchSpecs, op)

                finalTroveSet = finalTroveSet._action(eraseMatches,
                        ActionClass=self.RemoveAction,
                        index = op.getLocation())
                continue

            if isinstance(op, model.IncludeOperation):
                # we need a complete total search set to pass into the sub
                # ops, since they have their compilation deferred
                rebuildTotalSearchSet = True

            if rebuildTotalSearchSet:
                totalSearchSet = self.SearchPathTroveSet( newSearchPath +
                                                           [ totalSearchSet ],
                                                         graph = self.g)
                newSearchPath = []
                rebuildTotalSearchSet = False

            searchMatches = self._splitFind(self.FindAction, totalSearchSet,
                                            searchSpecs, op)
            localMatches = self._splitFind(self.FindAction, self.dbTroveSet,
                                           localSpecs, op)

            if searchMatches and localMatches:
                matches = searchMatches._action(localMatches,
                                                ActionClass = self.UnionAction,
                                                index = op.getLocation())
            elif searchMatches:
                matches = searchMatches
            else:
                matches = localMatches

            if isinstance(op, model.IncludeOperation):
                assert(not localMatches)
                finalTroveSet = finalTroveSet._action(
                                matches, totalSearchSet,
                                compiler = self,
                                ActionClass = self.IncludeAction,
                                SearchPathClass = self.SearchPathTroveSet)
                totalSearchSet = finalTroveSet.finalSearchSet
                continue
            elif isinstance(op, model.InstallTroveOperation):
                finalTroveSet = finalTroveSet._action(matches,
                                        ActionClass = self.UnionAction,
                                        index = op.getLocation())
            elif isinstance(op, model.PatchTroveOperation):
                finalTroveSet = finalTroveSet._action(matches,
                                        ActionClass = self.PatchAction,
                                        index = op.getLocation())
            elif isinstance(op, model.UpdateTroveOperation):
                finalTroveSet = finalTroveSet._action(matches,
                                        ActionClass = self.UpdateAction,
                                        index = op.getLocation())
            elif isinstance(op, model.OfferTroveOperation):
                finalTroveSet = finalTroveSet._action(matches,
                                        ActionClass = self.OptionalAction,
                                        index = op.getLocation())
            else:
                assert(0)

            newSearchPath.insert(0, matches)

            for troveSpec in op:
                if troveSpec.name in collections:
                    rebuildTotalSearchSet = True
                    break

        if newSearchPath:
            totalSearchSet = self.SearchPathTroveSet( newSearchPath +
                                                       [ totalSearchSet ],
                                                     graph = self.g)

        finalTroveSet.searchPath = totalSearchSet

        return finalTroveSet
    def _buildRedirect(self, trvCsDict, sourceFlavor, sourceVersion, rule, target):
        if target[0] is not None:
            redirInfo = _RedirectInfo(target[0], target[1].branch(), rule.targetFlavor)
        else:
            redirInfo = _RemoveRedirect()

        self.redirections.add(rule.sourceName, sourceFlavor, redirInfo)

        # Groups don't include any additional redirections, and
        # neither do items which aren't collections
        if trove.troveIsGroup(rule.sourceName) or not trove.troveIsCollection(rule.sourceName):
            return

        if target[0] is not None:
            targetTrove = self.repos.getTrove(withFiles=False, *target)
            targetComponents = set([x[0].split(":")[1] for x in targetTrove.iterTroveList(strongRefs=True)])
        else:
            targetComponents = set()

        # we can't integrity check here because we got
        # the trove w/o files
        trvCs = trvCsDict[(rule.sourceName, sourceVersion, sourceFlavor)]
        trv = trove.Trove(trvCs)

        # assemble a set of all of the components included
        # in this trove
        currentComponents = set([x[0].split(":")[1] for x in trv.iterTroveList(strongRefs=True)])

        # components shared between the current trove and
        # the target should be redirected to the target
        # components
        for compName in currentComponents & targetComponents:
            sourceCompName = rule.sourceName + ":" + compName
            targetCompName = redirInfo.targetName + ":" + compName
            self.redirections.add(
                sourceCompName,
                sourceFlavor,
                _RedirectInfo(targetCompName, redirInfo.targetBranch, redirInfo.targetFlavor),
            )

        # now get all of the components which have been
        # included in this trove anywhere on the branch; those
        # components need to generate erase redirects
        allVersions = self.repos.getTroveVersionsByBranch({trv.getName(): {trv.getVersion().branch(): None}})
        l = []
        for subVersion, subFlavorList in allVersions[trv.getName()].iteritems():
            l += [(trv.getName(), subVersion, flavor) for flavor in subFlavorList]

        allTroves = self.repos.getTroves(l, withFiles=False)
        allComponents = set()
        for otherTrv in allTroves:
            allComponents.update([x[0].split(":")[1] for x in otherTrv.iterTroveList(strongRefs=True)])

        # components which existed at any point for this
        # trove but don't have a component in the redirect
        # target need to be erased
        for subName in allComponents - targetComponents:
            newName = rule.sourceName + ":" + subName
            self.redirections.add(newName, sourceFlavor, _RemoveRedirect())

        # the package redirect includes references to the
        # component redirects to let the update code know
        # how to redirect the components; this tracks the
        # components of this redirect
        redirInfo.addComponents([rule.sourceName + ":" + x for x in allComponents])
Beispiel #24
0
 def isGroupName(packageName):
     return trove.troveIsGroup(packageName)
Beispiel #25
0
def buildJobList(src, target, groupList, absolute=False, splitNodes=True,
        jobSize=20):
    # Match each trove with something we already have; this is to mirror
    # using relative changesets, which is a lot more efficient than using
    # absolute ones.
    q = {}
    srcAvailable = {}
    for group in groupList:
        for mark, (name, version, flavor) in group:
            # force groups to always be transferred using absolute changesets
            if trove.troveIsGroup(name):
                continue
            srcAvailable[(name,version,flavor)] = True
            d = q.setdefault(name, {})
            l = d.setdefault(version.branch(), [])
            l.append(flavor)

    # check that the latestavailable versions from the target are
    # present on the source to be able to use relative changesets
    latestAvailable = {}
    if len(q):
        latestAvailable = target.getTroveLeavesByBranch(q)
        latestAvailable = dict(
                    (name, dict(
                        (version, set(flavors))
                        for (version, flavors) in versions.iteritems()
                    )) for (name, versions) in latestAvailable.iteritems())
    if len(latestAvailable):
        def _tol(d):
            for n, vd in d.iteritems():
                for v, fl in vd.iteritems():
                    for f in fl:
                        yield (n,v,f)
        ret = src.hasTroves(list(_tol(latestAvailable)), hidden=True)
        srcAvailable.update(ret)

    def _split():
        # Stop adding troves to this job and allow its troves to be used for
        # the next job's relative changesets.
        for mark, job in jobList[-1]:
            name = job[0]
            if trove.troveIsGroup(name):
                continue
            oldVersion, oldFlavor = job[1]
            newVersion, newFlavor = job[2]

            srcAvailable[(name, newVersion, newFlavor)] = True
            d = latestAvailable.setdefault(name, {})

            if oldVersion in d and oldVersion.branch() == newVersion.branch():
                # If the old version is on the same branch as the new one,
                # replace the old with the new. If it's on a different
                # branch, we'll track both.
                flavorList = d[oldVersion]
                flavorList.discard(oldFlavor)
                if not flavorList:
                    del d[oldVersion]

            flavorList = d.setdefault(newVersion, set())
            flavorList.add(newFlavor)
        if jobList[-1]:
            jobList.append([])

    # we'll keep latestAvailable in sync with what the target will look like
    # as the mirror progresses
    jobList = [[]]
    currentNodes = set()
    currentHost = None
    for group in groupList:
        # for each job find what it's relative to and build up a job list
        thisJob = []
        for mark, (name, version, flavor) in group:
            # name, version, versionDistance, flavorScore
            currentMatch = (None, None, None, None)
            if absolute or name not in latestAvailable:
                job = (name, (None, None), (version, flavor), True)
            else:
                d = latestAvailable[name]
                for repVersion, flavorList in d.iteritems():
                    # the versions have to be on the same host to be
                    # able to generate relative changesets
                    if version.getHost() != repVersion.getHost():
                        continue
                    for repFlavor in flavorList:
                        if not srcAvailable.get((name, repVersion, repFlavor), False):
                            continue
                        score = flavor.score(repFlavor)
                        if score is False:
                            continue
                        if repVersion == version:
                            closeness = 100000
                        else:
                            closeness = version.closeness(repVersion)
                        if score < currentMatch[3]:
                            continue
                        elif score > currentMatch[3]:
                            currentMatch = (repVersion, repFlavor, closeness,
                                            score)
                        elif closeness < currentMatch[2]:
                            continue
                        else:
                            currentMatch = (repVersion, repFlavor, closeness,
                                            score)

                job = (name, (currentMatch[0], currentMatch[1]),
                              (version, flavor), currentMatch[0] is None)

            thisJob.append((mark, job))

        newNodes = set((x[1][0], x[1][2][0].branch()) for x in thisJob)
        newHosts = set(x[1][2][0].getHost() for x in thisJob)
        assert len(newHosts) == 1
        newHost = list(newHosts)[0]
        if (len(jobList[-1]) >= jobSize
                # Can't commit two versions of the same trove
                or (splitNodes and newNodes & currentNodes)
                # Can't commit troves on different hosts
                or currentHost not in (None, newHost)
                ):
            _split()
            currentNodes = set()
        jobList[-1].extend(thisJob)
        currentNodes.update(newNodes)
        currentHost = newHost

    if not jobList[-1]:
        jobList.pop()
    return jobList
Beispiel #26
0
    def _walk(self, troveCache, newGroups = True, recurse = False,
              installSetOverrides = {}):
        """
        Return ((name, version, flavor), inInstallSet, explicit) tuples
        for the troves referenced by this TroveSet. inInstallSet is True
        if this trove is included in the installSet (byDefault True) for
        any of the troves which include it. It is considered explicit
        iff it is included directly by this TroveSet.

        @param troveCache: TroveCache to use for iterating trove contents
        @type troveCache: TroveSource
        @param newGroups: Return newly created groups. Version will
        be NewVersion().
        @type newGroups: bool
        @param recurse: Return full recursive closure. When possible, implicit
        includes are used to generate this information.
        @type recurse: bool
        @rtype: ((str, versions.Version, deps.Flavor), isInstall, isExplicit)
        """

        if not recurse:
            result = []
            for (troveTup) in self._getInstallSet():
                inInstallSet = installSetOverrides.get(troveTup, True)
                if (newGroups
                        or not isinstance(troveTup[1], versions.NewVersion)):
                    result.append( (troveTup, inInstallSet, True) )

            for (troveTup) in self._getOptionalSet():
                inInstallSet = installSetOverrides.get(troveTup, False)
                if (newGroups
                        or not isinstance(troveTup[1], versions.NewVersion)):
                    result.append( (troveTup, inInstallSet, True) )

            return result

        if not installSetOverrides and self._walkCache is not None:
            return self._walkCache

        walkResult = []

        usedPackages = set()
        for troveTuple in itertools.chain(self.installSet, self.optionalSet):
            if trove.troveIsComponent(troveTuple[0]):
                usedPackages.add(troveTuple[0].split(":")[0])

        collections = list()
        newCollections = list()
        for troveTuple in itertools.chain(self.installSet, self.optionalSet):
            if (isinstance(troveTuple[1], versions.NewVersion)):
                newCollections.append(troveTuple)
            elif (trove.troveIsGroup(troveTuple[0]) or
                        troveTuple[0] in usedPackages):
                collections.append(troveTuple)

        troveCache.cacheTroves(collections)

        containedBy = dict ( (x, []) for x in
                           itertools.chain(self.installSet, self.optionalSet))
        containsItems = dict ( (x, False) for x in
                           itertools.chain(self.installSet, self.optionalSet))

        for troveTuple in itertools.chain(self.installSet, self.optionalSet):
            for collection in itertools.chain(collections, newCollections):
                if troveCache.troveReferencesTrove(collection, troveTuple):
                    containsItems[collection] = True
                    containedBy[troveTuple].append(collection)

        # for each pair of troves determine the longest path between them; we
        # do this through a simple tree walk
        maxPathLength = {}
        searchList = [ (x, x, 0) for x, y in containsItems.iteritems()
                            if not y ]
        while searchList:
            start, next, depth = searchList.pop(0)

            knownDepth = maxPathLength.get( (start, next), -1 )
            if depth > knownDepth:
                maxPathLength[(start, next)] = depth

            for container in containedBy[next]:
                searchList.append( (start, container, depth + 2) )

        searchList = sorted([ (x, x, 0) for x, y in containsItems.iteritems()
                              if not y ])

        def handle(tt, dp, ii):
            val = results.get(tt)

            if val is None:
                results[tt] = (dp, ii)
            elif val[0] == dp:
                results[tt] = (dp, ii or val[1])
            elif val[0] > dp:
                results[tt] = (dp, ii)

        results = {}
        seenDepths = {}
        while searchList:
            start, troveTup, depth = searchList.pop(0)

            if depth < maxPathLength[(start, troveTup)]:
                continue
            assert(maxPathLength[(start, troveTup)] == depth)

            seenAtDepth = seenDepths.get(troveTup)
            if seenAtDepth is not None and seenAtDepth <= depth:
                # we've walked this at a lower depth; there is no reason
                # to do so again
                continue
            seenDepths[troveTup] = depth

            inInstallSet = installSetOverrides.get(troveTup,
                                                   troveTup in self.installSet)

            handle(troveTup, depth, inInstallSet)

            for child in containedBy[troveTup]:
                searchList.append( (start, child, depth + 2) )

            if not recurse:
                continue

            if inInstallSet or not trove.troveIsPackage(troveTup[0]):
                for subTroveTup, subIsInstall, subIsExplicit in \
                                troveCache.iterTroveListInfo(troveTup):
                    overridenSubIsInstall = installSetOverrides.get(
                            subTroveTup, subIsInstall)
                    handle(subTroveTup, depth + 1,
                           inInstallSet and overridenSubIsInstall)
            else:
                for componentName in troveCache.getPackageComponents(troveTup):
                    handle((componentName, troveTup[1], troveTup[2]),
                           depth + 1, False)

        for (troveTup), (depth, isInstall) in results.iteritems():
            if (newGroups
                    or not isinstance(troveTup[1], versions.NewVersion)):
                walkResult.append(
                        (troveTup, isInstall,
                            (troveTup in self.installSet or
                             troveTup in self.optionalSet) ) )

        if not installSetOverrides:
            self._walkCache = walkResult

        return walkResult
Beispiel #27
0
    def _buildRedirect(self, trvCsDict, sourceFlavor, sourceVersion, rule,
                       target):
        if target[0] is not None:
            redirInfo = _RedirectInfo(target[0], target[1].branch(),
                                      rule.targetFlavor)
        else:
            redirInfo = _RemoveRedirect()

        self.redirections.add(rule.sourceName, sourceFlavor, redirInfo)

        # Groups don't include any additional redirections, and
        # neither do items which aren't collections
        if (trove.troveIsGroup(rule.sourceName)
                or not trove.troveIsCollection(rule.sourceName)):
            return

        if target[0] is not None:
            targetTrove = self.repos.getTrove(withFiles=False, *target)
            targetComponents = set([
                x[0].split(':')[1]
                for x in targetTrove.iterTroveList(strongRefs=True)
            ])
        else:
            targetComponents = set()

        # we can't integrity check here because we got
        # the trove w/o files
        trvCs = trvCsDict[(rule.sourceName, sourceVersion, sourceFlavor)]
        trv = trove.Trove(trvCs)

        # assemble a set of all of the components included
        # in this trove
        currentComponents = set(
            [x[0].split(':')[1] for x in trv.iterTroveList(strongRefs=True)])

        # components shared between the current trove and
        # the target should be redirected to the target
        # components
        for compName in currentComponents & targetComponents:
            sourceCompName = rule.sourceName + ':' + compName
            targetCompName = redirInfo.targetName + ':' + compName
            self.redirections.add(
                sourceCompName, sourceFlavor,
                _RedirectInfo(targetCompName, redirInfo.targetBranch,
                              redirInfo.targetFlavor))

        # now get all of the components which have been
        # included in this trove anywhere on the branch; those
        # components need to generate erase redirects
        allVersions = self.repos.getTroveVersionsByBranch(
            {trv.getName(): {
                 trv.getVersion().branch(): None
             }})
        l = []
        for subVersion, subFlavorList in \
                allVersions[trv.getName()].iteritems():
            l += [(trv.getName(), subVersion, flavor)
                  for flavor in subFlavorList]

        allTroves = self.repos.getTroves(l, withFiles=False)
        allComponents = set()
        for otherTrv in allTroves:
            allComponents.update([
                x[0].split(':')[1]
                for x in otherTrv.iterTroveList(strongRefs=True)
            ])

        # components which existed at any point for this
        # trove but don't have a component in the redirect
        # target need to be erased
        for subName in allComponents - targetComponents:
            newName = rule.sourceName + ':' + subName
            self.redirections.add(newName, sourceFlavor, _RemoveRedirect())

        # the package redirect includes references to the
        # component redirects to let the update code know
        # how to redirect the components; this tracks the
        # components of this redirect
        redirInfo.addComponents(
            [rule.sourceName + ':' + x for x in allComponents])
Beispiel #28
0
def buildJobList(src, target, groupList, absolute=False):
    # Match each trove with something we already have; this is to mirror
    # using relative changesets, which is a lot more efficient than using
    # absolute ones.
    q = {}
    srcAvailable = {}
    for group in groupList:
        for mark, (name, version, flavor) in group:
            # force groups to always be transferred using absolute changesets
            if trove.troveIsGroup(name):
                continue
            srcAvailable[(name, version, flavor)] = True
            d = q.setdefault(name, {})
            l = d.setdefault(version.branch(), [])
            l.append(flavor)

    # check that the latestavailable versions from the target are
    # present on the source to be able to use relative changesets
    latestAvailable = {}
    if len(q):
        latestAvailable = target.getTroveLeavesByBranch(q)
    if len(latestAvailable):

        def _tol(d):
            for n, vd in d.iteritems():
                for v, fl in vd.iteritems():
                    for f in fl:
                        yield (n, v, f)

        ret = src.hasTroves(list(_tol(latestAvailable)), hidden=True)
        srcAvailable.update(ret)

    # we'll keep latestAvailable in sync with what the target will look like
    # as the mirror progresses
    jobList = []
    for group in groupList:
        groupJobList = []
        # for each job find what it's relative to and build up groupJobList
        # as the job list for this group
        for mark, (name, version, flavor) in group:
            # name, version, versionDistance, flavorScore
            currentMatch = (None, None, None, None)
            if absolute or name not in latestAvailable:
                job = (name, (None, None), (version, flavor), True)
            else:
                d = latestAvailable[name]
                for repVersion, flavorList in d.iteritems():
                    # the versions have to be on the same host to be
                    # able to generate relative changesets
                    if version.getHost() != repVersion.getHost():
                        continue
                    for repFlavor in flavorList:
                        if not srcAvailable.get(
                            (name, repVersion, repFlavor), False):
                            continue
                        score = flavor.score(repFlavor)
                        if score is False:
                            continue
                        if repVersion == version:
                            closeness = 100000
                        else:
                            closeness = version.closeness(repVersion)
                        if score < currentMatch[3]:
                            continue
                        elif score > currentMatch[3]:
                            currentMatch = (repVersion, repFlavor, closeness,
                                            score)
                        elif closeness < currentMatch[2]:
                            continue
                        else:
                            currentMatch = (repVersion, repFlavor, closeness,
                                            score)

                job = (name, (currentMatch[0], currentMatch[1]),
                       (version, flavor), currentMatch[0] is None)

            groupJobList.append((mark, job))

        # now iterate through groupJobList and update latestAvailable to
        # reflect the state of the mirror after this job completes
        for mark, job in groupJobList:
            name = job[0]
            if trove.troveIsGroup(name):
                continue
            oldVersion, oldFlavor = job[1]
            newVersion, newFlavor = job[2]

            srcAvailable[(name, newVersion, newFlavor)] = True
            d = latestAvailable.setdefault(name, {})

            if oldVersion in d and oldVersion.branch() == newVersion.branch():
                # If the old version is on the same branch as the new one,
                # replace the old with the new. If it's on a different
                # branch, we'll track both.
                d[oldVersion].remove(oldFlavor)
                if not d[oldVersion]: del d[oldVersion]

            flavorList = d.setdefault(newVersion, [])
            flavorList.append(newFlavor)

        jobList.append(groupJobList)

    return jobList
Beispiel #29
0
    def mineLabel(self, labelText, jiraProject):

        print 'Looking at %s product...' %jiraProject

        sourceMap = {}
        sourceOwner = {}
        label = versions.Label(labelText)

        repoPkgs = frozenset([ x for x in self.repos.troveNames(label) if ':' not in x and not (x.startswith('cross-') or x.startswith('bootstrap-') or trove.troveIsGroup(x)) ])

        cu = self.db.cursor()
        cu.execute("""SELECT component.cname
                      FROM component, project
                      WHERE component.project = project.id
                        AND project.pname = %s""", jiraProject)
        jiraPkgs = frozenset([r[0] for r in cu.fetchall()])

        newPkgs = sorted(list(repoPkgs-jiraPkgs))

        troveVersions = self.repos.getTroveLeavesByLabel(
            dict.fromkeys(newPkgs, {label: None}))

        for troveName in newPkgs:
            self.log('checking binary package ' + troveName)
            # need latest version
            troveVersion = sorted(troveVersions[troveName].keys())[-1]
            # we only need one flavor, any flavor, to get the sourceName
            troveFlavor = troveVersions[troveName][troveVersion][0]
            trove = self.repos.getTrove(troveName, troveVersion, troveFlavor,
                                   withFiles=False)
            if trove.isRedirect():
                # We do not want to modify jira automatically when we
                # see a redirect, because the redirect may not apply to
                # all versions, and we might really want to keep existing
                # versions the same.
                self.log(' ...ignoring redirected trove ' + troveName)
                continue

            sourceName = trove.getSourceName()
            if not sourceName:
                # old package from before troveinfo
                continue
            sourceNick = sourceName.split(':')[0]
            if sourceNick in jiraPkgs:
                # database doesn't like double-adds
                self.log(' ...source trove %s already in jira' %sourceNick)
                continue
            if sourceNick in sourceMap:
                sourceMap[sourceNick][trove.getName()] = True
                # only investigate each source trove once
                self.log(' ...already checked source trove ' + sourceNick)
                continue
            sourceMap[sourceNick] = {trove.getName(): True}

            sourceVerList = self.repos.getTroveVersionsByLabel(
                {sourceName: {label : None} })
            sourceVerList = sorted(sourceVerList[sourceName].keys())
            l = []
            for sourceVer in sourceVerList:
                l.extend(((sourceName, sourceVer, deps.Flavor()),))
            sourceTroves = self.repos.getTroves(l)

            personMap = {}
            firstPerson = None
            for sourceTrove in sourceTroves:
                cl = sourceTrove.getChangeLog()
                person = self.getPerson(cl.getName(), labelText)
                if not firstPerson:
                    firstPerson = person
                if person in personMap:
                    personMap[person] += 1
                else:
                    personMap[person] = 1
            if firstPerson:
                # original committer is more likely to be the responsible party
                personMap[firstPerson] += 3

            candidate = sorted(personMap.items(), key=lambda x: x[1])[-1][0]
            if not candidate:
                print "No best owner recognized for %s" %sourceNick
                continue
            sourceOwner[sourceNick] = candidate
            print " Best owner for source %s is %s" %(
                    sourceNick, sourceOwner[sourceNick])

        self.sourceMap[jiraProject] = sourceMap
        self.sourceOwner[jiraProject] = sourceOwner
Beispiel #30
0
def mirrorRepository(
        sourceRepos,
        targetRepos,
        cfg,
        test=False,
        sync=False,
        syncSigs=False,
        callback=ChangesetCallback(),
        fastSync=False,
        referenceRepos=None,
):
    if referenceRepos is None:
        referenceRepos = sourceRepos
    checkConfig(cfg)
    targets = _makeTargets(cfg, targetRepos, test)
    log.debug("-" * 20 + " start loop " + "-" * 20)

    hidden = len(targets) > 1 or cfg.useHiddenCommits
    if hidden:
        log.debug("will use hidden commits to synchronize target mirrors")

    if sync:
        currentMark = -1
    else:
        marks = [t.getMirrorMark() for t in targets]
        # we use the oldest mark as a starting point (since we have to
        # get stuff from source for that oldest one anyway)
        currentMark = min(marks)
    log.debug("using common mirror mark %s", currentMark)
    # reset mirror mark to the lowest common denominator
    for t in targets:
        if t.getMirrorMark() != currentMark:
            t.setMirrorMark(currentMark)
    # mirror gpg signatures from the src into the targets
    for t in targets:
        t.mirrorGPG(referenceRepos, cfg.host)
    # mirror changed trove information for troves already mirrored
    if fastSync:
        updateCount = 0
        log.debug("skip trove info records sync because of fast-sync")
    else:
        updateCount = mirrorTroveInfo(referenceRepos, targets, currentMark,
                                      cfg, syncSigs)
    newMark, troveList = getTroveList(referenceRepos, cfg, currentMark)
    if not troveList:
        if newMark > currentMark:  # something was returned, but filtered out
            for t in targets:
                t.setMirrorMark(newMark)
            return -1  # call again
        return 0
    # prepare a new max mark to be used when we need to break out of a loop
    crtMaxMark = max(long(x[0]) for x in troveList)
    if currentMark > 0 and crtMaxMark == currentMark:
        # if we're hung on the current max then we need to
        # forcibly advance the mark in case we're stuck
        crtMaxMark += 1  # only used if we filter out all troves below
    initTLlen = len(troveList)

    # removed troves are a special blend - we keep them separate
    removedSet = set(
        [x[1] for x in troveList if x[2] == trove.TROVE_TYPE_REMOVED])
    troveList = [(x[0], x[1]) for x in troveList
                 if x[2] != trove.TROVE_TYPE_REMOVED]

    # figure out if we need to recurse the group-troves
    if cfg.recurseGroups:
        # avoid adding duplicates
        troveSetList = set([x[1] for x in troveList])
        for mark, (name, version, flavor) in troveList:
            if trove.troveIsGroup(name):
                recTroves = recurseTrove(referenceRepos,
                                         name,
                                         version,
                                         flavor,
                                         callback=callback)

                # add sources here:
                if cfg.includeSources:
                    troveInfo = referenceRepos.getTroveInfo(
                        trove._TROVEINFO_TAG_SOURCENAME, recTroves)
                    sourceComps = set()
                    for nvf, source in itertools.izip(recTroves, troveInfo):
                        sourceComps.add((source(), nvf[1].getSourceVersion(),
                                         parseFlavor('')))
                    recTroves.extend(sourceComps)

                # add the results at the end with the current mark
                for (n, v, f) in recTroves:
                    if (n, v, f) not in troveSetList:
                        troveList.append((mark, (n, v, f)))
                        troveSetList.add((n, v, f))
        log.debug("after group recursion %d troves are needed", len(troveList))
        # we need to make sure we mirror the GPG keys of any newly added troves
        newHosts = set(
            [x[1].getHost() for x in troveSetList.union(removedSet)])
        for host in newHosts.difference(set([cfg.host])):
            for t in targets:
                t.mirrorGPG(referenceRepos, host)

    # we check which troves from the troveList are needed on each
    # target and we split the troveList into separate lists depending
    # on how many targets require each
    byTarget = {}
    targetSetList = []
    if len(troveList):
        byTrove = {}
        for i, target in enumerate(targets):
            for t in target.addTroveList(troveList):
                bt = byTrove.setdefault(t, set())
                bt.add(i)
        # invert the dict by target now
        for trv, ts in byTrove.iteritems():
            targetSet = [targets[i] for i in ts]
            try:
                targetIdx = targetSetList.index(targetSet)
            except ValueError:
                targetSetList.append(targetSet)
                targetIdx = len(targetSetList) - 1
            bt = byTarget.setdefault(targetIdx, [])
            bt.append(trv)
        del byTrove
    # if we were returned troves, but we filtered them all out, advance the
    # mark and signal "try again"
    if len(byTarget) == 0 and len(removedSet) == 0 and initTLlen:
        # we had troves and now we don't
        log.debug("no troves found for our label %s" % cfg.labels)
        for t in targets:
            t.setMirrorMark(crtMaxMark)
        # try again
        return -1

    # now we get each section of the troveList for each targetSet. We
    # start off mirroring by those required by fewer targets, using
    # the assumption that those troves are what is required for the
    # targets to catch up to a common set
    if len(byTarget) > 1:
        log.debug("split %d troves into %d chunks by target", len(troveList),
                  len(byTarget))
    # sort the targetSets by length
    targetSets = list(enumerate(targetSetList))
    targetSets.sort(lambda a, b: cmp(len(a[1]), len(b[1])))
    bundlesMark = 0
    for idx, targetSet in targetSets:
        troveList = byTarget[idx]
        if not troveList:  # XXX: should not happen...
            continue
        log.debug("mirroring %d troves into %d targets", len(troveList),
                  len(targetSet))
        # since these troves are required for all targets, we can use
        # the "first" one to build the relative changeset requests
        target = list(targetSet)[0]
        bundles = buildBundles(sourceRepos, target, troveList,
                               cfg.absoluteChangesets)
        for i, bundle in enumerate(bundles):
            jobList = [x[1] for x in bundle]
            # XXX it's a shame we can't give a hint as to what server to use
            # to avoid having to open the changeset and read in bits of it
            if test:
                log.debug("test mode: not mirroring (%d of %d) %s" %
                          (i + 1, len(bundles), jobList))
                updateCount += len(bundle)
                continue
            (outFd, tmpName) = util.mkstemp()
            os.close(outFd)
            log.debug("getting (%d of %d) %s" %
                      (i + 1, len(bundles), displayBundle(bundle)))
            try:
                sourceRepos.createChangeSetFile(jobList,
                                                tmpName,
                                                recurse=False,
                                                callback=callback,
                                                mirrorMode=True)
            except changeset.ChangeSetKeyConflictError:
                splitJobList(jobList,
                             sourceRepos,
                             targetSet,
                             hidden=hidden,
                             callback=callback)
            else:
                for target in targetSet:
                    target.commitChangeSetFile(tmpName,
                                               hidden=hidden,
                                               callback=callback)
            try:
                os.unlink(tmpName)
            except OSError:
                pass
            callback.done()
        updateCount += len(bundle)
        # compute the max mark of the bundles we comitted
        mark = max([min([x[0] for x in bundle]) for bundle in bundles])
        if mark > bundlesMark:
            bundlesMark = mark
    else:  # only when we're all done looping advance mark to the new max
        if bundlesMark == 0 or bundlesMark <= currentMark:
            bundlesMark = crtMaxMark  # avoid repeating the same query...
        for target in targets:
            if hidden:  # if we've hidden the last commits, show them now
                target.presentHiddenTroves()
            target.setMirrorMark(bundlesMark)
    # mirroring removed troves requires one by one processing
    for target in targets:
        copySet = removedSet.copy()
        updateCount += mirrorRemoved(referenceRepos,
                                     target.repo,
                                     copySet,
                                     test=test,
                                     callback=callback)
    # if this was a noop because the removed troves were already mirrored
    # we need to keep going
    if updateCount == 0 and len(removedSet):
        for target in targets:
            target.setMirrorMark(crtMaxMark)
        return -1
    return updateCount
Beispiel #31
0
    def _walk(self,
              troveCache,
              newGroups=True,
              recurse=False,
              installSetOverrides={}):
        """
        Return ((name, version, flavor), inInstallSet, explicit) tuples
        for the troves referenced by this TroveSet. inInstallSet is True
        if this trove is included in the installSet (byDefault True) for
        any of the troves which include it. It is considered explicit
        iff it is included directly by this TroveSet.

        @param troveCache: TroveCache to use for iterating trove contents
        @type troveCache: TroveSource
        @param newGroups: Return newly created groups. Version will
        be NewVersion().
        @type newGroups: bool
        @param recurse: Return full recursive closure. When possible, implicit
        includes are used to generate this information.
        @type recurse: bool
        @rtype: ((str, versions.Version, deps.Flavor), isInstall, isExplicit)
        """

        if not recurse:
            result = []
            for (troveTup) in self._getInstallSet():
                inInstallSet = installSetOverrides.get(troveTup, True)
                if (newGroups
                        or not isinstance(troveTup[1], versions.NewVersion)):
                    result.append((troveTup, inInstallSet, True))

            for (troveTup) in self._getOptionalSet():
                inInstallSet = installSetOverrides.get(troveTup, False)
                if (newGroups
                        or not isinstance(troveTup[1], versions.NewVersion)):
                    result.append((troveTup, inInstallSet, True))

            return result

        if not installSetOverrides and self._walkCache is not None:
            return self._walkCache

        walkResult = []

        usedPackages = set()
        for troveTuple in itertools.chain(self.installSet, self.optionalSet):
            if trove.troveIsComponent(troveTuple[0]):
                usedPackages.add(troveTuple[0].split(":")[0])

        collections = list()
        newCollections = list()
        for troveTuple in itertools.chain(self.installSet, self.optionalSet):
            if (isinstance(troveTuple[1], versions.NewVersion)):
                newCollections.append(troveTuple)
            elif (trove.troveIsGroup(troveTuple[0])
                  or troveTuple[0] in usedPackages):
                collections.append(troveTuple)

        troveCache.cacheTroves(collections)

        containedBy = dict(
            (x, [])
            for x in itertools.chain(self.installSet, self.optionalSet))
        containsItems = dict(
            (x, False)
            for x in itertools.chain(self.installSet, self.optionalSet))

        for troveTuple in itertools.chain(self.installSet, self.optionalSet):
            for collection in itertools.chain(collections, newCollections):
                if troveCache.troveReferencesTrove(collection, troveTuple):
                    containsItems[collection] = True
                    containedBy[troveTuple].append(collection)

        # for each pair of troves determine the longest path between them; we
        # do this through a simple tree walk
        maxPathLength = {}
        searchList = [(x, x, 0) for x, y in containsItems.iteritems() if not y]
        while searchList:
            start, next, depth = searchList.pop(0)

            knownDepth = maxPathLength.get((start, next), -1)
            if depth > knownDepth:
                maxPathLength[(start, next)] = depth

            for container in containedBy[next]:
                searchList.append((start, container, depth + 2))

        searchList = sorted([(x, x, 0) for x, y in containsItems.iteritems()
                             if not y])

        def handle(tt, dp, ii):
            val = results.get(tt)

            if val is None:
                results[tt] = (dp, ii)
            elif val[0] == dp:
                results[tt] = (dp, ii or val[1])
            elif val[0] > dp:
                results[tt] = (dp, ii)

        results = {}
        seenDepths = {}
        while searchList:
            start, troveTup, depth = searchList.pop(0)

            if depth < maxPathLength[(start, troveTup)]:
                continue
            assert (maxPathLength[(start, troveTup)] == depth)

            seenAtDepth = seenDepths.get(troveTup)
            if seenAtDepth is not None and seenAtDepth <= depth:
                # we've walked this at a lower depth; there is no reason
                # to do so again
                continue
            seenDepths[troveTup] = depth

            inInstallSet = installSetOverrides.get(troveTup, troveTup
                                                   in self.installSet)

            handle(troveTup, depth, inInstallSet)

            for child in containedBy[troveTup]:
                searchList.append((start, child, depth + 2))

            if not recurse:
                continue

            if inInstallSet or not trove.troveIsPackage(troveTup[0]):
                for subTroveTup, subIsInstall, subIsExplicit in \
                                troveCache.iterTroveListInfo(troveTup):
                    overridenSubIsInstall = installSetOverrides.get(
                        subTroveTup, subIsInstall)
                    handle(subTroveTup, depth + 1, inInstallSet
                           and overridenSubIsInstall)
            else:
                for componentName in troveCache.getPackageComponents(troveTup):
                    handle((componentName, troveTup[1], troveTup[2]),
                           depth + 1, False)

        for (troveTup), (depth, isInstall) in results.iteritems():
            if (newGroups or not isinstance(troveTup[1], versions.NewVersion)):
                walkResult.append(
                    (troveTup, isInstall, (troveTup in self.installSet
                                           or troveTup in self.optionalSet)))

        if not installSetOverrides:
            self._walkCache = walkResult

        return walkResult
Beispiel #32
0
    def _matchPrebuiltTroves(self, buildTroves, prebuiltTroveList):
        if not prebuiltTroveList:
            return
        trovesByNV = {}
        trovesByLabel = {}
        needsSourceMatch = []
        for trv in buildTroves:
            trovesByNV.setdefault((trv.getName(),
                                   trv.getVersion()), []).append(trv)
            if (trv.getHost() == self.serverCfg.reposName
                and trv.getVersion().branch().hasParentBranch()):
                trovesByLabel.setdefault((trv.getName(),
                          trv.getVersion().branch().parentBranch().label()),
                          []).append(trv)
                needsSourceMatch.append(trv)
            else:
                trovesByLabel.setdefault((trv.getName(),
                                          trv.getLabel()), []).append(trv)

        needed = {}
        for n,v,f in prebuiltTroveList:
            if trv_mod.troveIsGroup(n):
                continue
            matchingTroves = trovesByNV.get((n + ':source',
                                                v.getSourceVersion()), False)
            if not matchingTroves:
                matchingTroves = trovesByLabel.get((n + ':source',
                                                    v.trailingLabel()), False)
            if matchingTroves:
                strongF = f.toStrongFlavor()
                maxScore = -999
                for matchingTrove in matchingTroves:
                    matchingFlavor = matchingTrove.getFlavor()
                    score = matchingFlavor.toStrongFlavor().score(strongF)
                    if score is False:
                        continue
                    if score > maxScore:
                        needed[n,v,f] = matchingTrove
        if not needed:
            return
        allBinaries = {}
        troveDict = {}
        for neededTup, matchingTrove in needed.iteritems():
            otherPackages = [ (x, neededTup[1], neededTup[2])
                               for x in matchingTrove.getDerivedPackages() 
                            ]
            hasTroves = self.repos.hasTroves(otherPackages)
            if isinstance(hasTroves, dict):
                hasTroves = [ hasTroves[x] for x in otherPackages ]
            otherPackages = [ x[1] for x
                              in zip(hasTroves, otherPackages) if x[0]]
            binaries = otherPackages
            otherPackages = self.repos.getTroves(otherPackages)
            troveDict.update((x.getNameVersionFlavor(), x) for x in otherPackages)
            binaries.extend(
                itertools.chain(*[x.iterTroveList(strongRefs=True) for x in otherPackages ]))
            allBinaries[neededTup] = binaries

        for troveTup, buildTrove in needed.iteritems():
            oldTrove = troveDict[troveTup]
            if buildTrove in needsSourceMatch:
                sourceVersion = oldTrove.getVersion().getSourceVersion()
                if buildTrove.getVersion().isUnmodifiedShadow():
                    sourceMatches = (buildTrove.getVersion().parentVersion()
                                     == sourceVersion)
                else:
                    sourceTrv = self.repos.getTrove(
                                   troveTup[0].split(':')[0] + ':source',
                                   sourceVersion, deps.Flavor())
                    clonedFromVer = sourceTrv.troveInfo.clonedFrom()
                    sourceMatches = (clonedFromVer == buildTrove.getVersion())
            else:
                sourceMatches = (troveTup[1].getSourceVersion()
                                 == buildTrove.getVersion())

            self._matchPrebuiltTrove(oldTrove, buildTrove,
                                 allBinaries[oldTrove.getNameVersionFlavor()],
                                 sourceMatches=sourceMatches)
Beispiel #33
0
def mirrorRepository(sourceRepos, targetRepos, cfg,
                     test = False, sync = False, syncSigs = False,
                     callback = ChangesetCallback(),
                     fastSync = False,
                     referenceRepos=None,
                     ):
    if referenceRepos is None:
        referenceRepos = sourceRepos
    checkConfig(cfg)
    targets = _makeTargets(cfg, targetRepos, test)
    log.debug("-" * 20 + " start loop " + "-" * 20)

    hidden = len(targets) > 1 or cfg.useHiddenCommits
    if hidden:
        log.debug("will use hidden commits to synchronize target mirrors")

    if sync:
        currentMark = -1
    else:
        marks = [ t.getMirrorMark() for t in targets ]
        # we use the oldest mark as a starting point (since we have to
        # get stuff from source for that oldest one anyway)
        currentMark = min(marks)
    log.debug("using common mirror mark %s", currentMark)
    # reset mirror mark to the lowest common denominator
    for t in targets:
        if t.getMirrorMark() != currentMark:
            t.setMirrorMark(currentMark)
    # mirror gpg signatures from the src into the targets
    for t in targets:
        t.mirrorGPG(referenceRepos, cfg.host)
    # mirror changed trove information for troves already mirrored
    if fastSync:
        updateCount = 0
        log.debug("skip trove info records sync because of fast-sync")
    else:
        updateCount = mirrorTroveInfo(referenceRepos, targets, currentMark,
                cfg, syncSigs)
    newMark, troveList = getTroveList(referenceRepos, cfg, currentMark)
    if not troveList:
        if newMark > currentMark: # something was returned, but filtered out
            for t in targets:
                t.setMirrorMark(newMark)
            return -1 # call again
        return 0
    # prepare a new max mark to be used when we need to break out of a loop
    crtMaxMark = max(long(x[0]) for x in troveList)
    if currentMark > 0 and crtMaxMark == currentMark:
        # if we're hung on the current max then we need to
        # forcibly advance the mark in case we're stuck
        crtMaxMark += 1 # only used if we filter out all troves below
    initTLlen = len(troveList)

    # removed troves are a special blend - we keep them separate
    removedSet  = set([ x[1] for x in troveList if x[2] == trove.TROVE_TYPE_REMOVED ])
    troveList = [ (x[0], x[1]) for x in troveList if x[2] != trove.TROVE_TYPE_REMOVED ]

    # figure out if we need to recurse the group-troves
    if cfg.recurseGroups:
        # avoid adding duplicates
        troveSetList = set([x[1] for x in troveList])
        for mark, (name, version, flavor) in troveList:
            if trove.troveIsGroup(name):
                recTroves = recurseTrove(referenceRepos, name,
                        version, flavor, callback=callback)

                # add sources here:
                if cfg.includeSources:
                    troveInfo = referenceRepos.getTroveInfo(
                        trove._TROVEINFO_TAG_SOURCENAME, recTroves)
                    sourceComps = set()
                    for nvf, source in itertools.izip(recTroves, troveInfo):
                        sourceComps.add((source(), nvf[1].getSourceVersion(),
                                         parseFlavor('')))
                    recTroves.extend(sourceComps)

                # add the results at the end with the current mark
                for (n, v, f) in recTroves:
                    if (n, v, f) not in troveSetList:
                        troveList.append((mark, (n, v, f)))
                        troveSetList.add((n, v, f))
        log.debug("after group recursion %d troves are needed", len(troveList))
        # we need to make sure we mirror the GPG keys of any newly added troves
        newHosts = set([x[1].getHost() for x in troveSetList.union(removedSet)])
        for host in newHosts.difference(set([cfg.host])):
            for t in targets:
                t.mirrorGPG(referenceRepos, host)

    # we check which troves from the troveList are needed on each
    # target and we split the troveList into separate lists depending
    # on how many targets require each
    byTarget = {}
    targetSetList = []
    if len(troveList):
        byTrove = {}
        for i, target in enumerate(targets):
            for t in target.addTroveList(troveList):
                bt = byTrove.setdefault(t, set())
                bt.add(i)
        # invert the dict by target now
        for trv, ts in byTrove.iteritems():
            targetSet = [ targets[i] for i in ts ]
            try:
                targetIdx = targetSetList.index(targetSet)
            except ValueError:
                targetSetList.append(targetSet)
                targetIdx = len(targetSetList)-1
            bt = byTarget.setdefault(targetIdx, [])
            bt.append(trv)
        del byTrove
    # if we were returned troves, but we filtered them all out, advance the
    # mark and signal "try again"
    if len(byTarget) == 0 and len(removedSet) == 0 and initTLlen:
        # we had troves and now we don't
        log.debug("no troves found for our label %s" % cfg.labels)
        for t in targets:
            t.setMirrorMark(crtMaxMark)
        # try again
        return -1

    # now we get each section of the troveList for each targetSet. We
    # start off mirroring by those required by fewer targets, using
    # the assumption that those troves are what is required for the
    # targets to catch up to a common set
    if len(byTarget) > 1:
        log.debug("split %d troves into %d chunks by target", len(troveList), len(byTarget))
    # sort the targetSets by length
    targetSets = list(enumerate(targetSetList))
    targetSets.sort(lambda a,b: cmp(len(a[1]), len(b[1])))
    bundlesMark = 0
    for idx, targetSet in targetSets:
        troveList = byTarget[idx]
        if not troveList: # XXX: should not happen...
            continue
        log.debug("mirroring %d troves into %d targets", len(troveList), len(targetSet))
        # since these troves are required for all targets, we can use
        # the "first" one to build the relative changeset requests
        target = list(targetSet)[0]
        bundles = buildBundles(sourceRepos, target, troveList, cfg.absoluteChangesets)
        for i, bundle in enumerate(bundles):
            jobList = [ x[1] for x in bundle ]
            # XXX it's a shame we can't give a hint as to what server to use
            # to avoid having to open the changeset and read in bits of it
            if test:
                log.debug("test mode: not mirroring (%d of %d) %s" % (i + 1, len(bundles), jobList))
                updateCount += len(bundle)
                continue
            (outFd, tmpName) = util.mkstemp()
            os.close(outFd)
            log.debug("getting (%d of %d) %s" % (i + 1, len(bundles), displayBundle(bundle)))
            try:
                sourceRepos.createChangeSetFile(jobList, tmpName, recurse = False,
                                                callback = callback, mirrorMode = True)
            except changeset.ChangeSetKeyConflictError:
                splitJobList(jobList, sourceRepos, targetSet, hidden=hidden,
                             callback=callback)
            else:
                for target in targetSet:
                    target.commitChangeSetFile(tmpName, hidden=hidden, callback=callback)
            try:
                os.unlink(tmpName)
            except OSError:
                pass
            callback.done()
        updateCount += len(bundle)
        # compute the max mark of the bundles we comitted
        mark = max([min([x[0] for x in bundle]) for bundle in bundles])
        if mark > bundlesMark:
            bundlesMark = mark
    else: # only when we're all done looping advance mark to the new max
        if bundlesMark == 0 or bundlesMark <= currentMark:
            bundlesMark = crtMaxMark # avoid repeating the same query...
        for target in targets:
            if hidden: # if we've hidden the last commits, show them now
                target.presentHiddenTroves()
            target.setMirrorMark(bundlesMark)
    # mirroring removed troves requires one by one processing
    for target in targets:
        copySet = removedSet.copy()
        updateCount += mirrorRemoved(referenceRepos, target.repo, copySet,
                                     test=test, callback=callback)
    # if this was a noop because the removed troves were already mirrored
    # we need to keep going
    if updateCount == 0 and len(removedSet):
        for target in targets:
            target.setMirrorMark(crtMaxMark)
        return -1
    return updateCount
Beispiel #34
0
    def augment(self, model, totalSearchSet, finalTroveSet):
        collections = set()
        for op in model.modelOps:
            if isinstance(op, model.SearchOperation):
                continue

            for troveTup in op:
                name = troveTup[0]
                if (isinstance(op, model.OfferTroveOperation)
                        or trove.troveIsComponent(name)):
                    collections.add(name.split(':')[0])
                elif trove.troveIsGroup(name):
                    collections.add(name)

        # this represents the path from "search" lines
        newSearchPath = []
        rebuildTotalSearchSet = False
        # the "total search" searches the current troveset first, then the
        # search path. we only reset this when an operation changed the
        # working troveset in a way which would affect later operations,
        # after searchTroveSet chagnes
        # changed the current troveset in a way which a

        # finalTroveSet is the current working set of what's been selected
        # so far

        for op in model.modelOps:
            if isinstance(op, model.SearchOperation):
                partialTup = op.item
                if isinstance(partialTup, versions.Label):
                    newSearchTroveSet = troveset.SearchSourceTroveSet(
                        searchsource.NetworkSearchSource(
                            self.repos, [partialTup], self.flavor),
                        graph=self.g)
                    newSearchSet = newSearchTroveSet
                elif partialTup[0] is not None:
                    newSearchSet = self.reposTroveSet.find(partialTup)
                else:
                    assert (0)

                newSearchPath.insert(0, newSearchSet)
                rebuildTotalSearchSet = True
                continue

            searchSpecs = []
            localSpecs = []
            for troveSpec in op:
                if (troveSpec.version is not None
                        and troveSpec.version[0] == '/'):
                    try:
                        verObj = versions.VersionFromString(troveSpec.version)
                        if verObj.isInLocalNamespace():
                            localSpecs.append(troveSpec)
                            continue

                    except (errors.VersionStringError, errors.ParseError):
                        pass

                searchSpecs.append(troveSpec)

            if isinstance(op, model.EraseTroveOperation):
                eraseMatches = self._splitFind(self.EraseFindAction,
                                               finalTroveSet, searchSpecs, op)

                finalTroveSet = finalTroveSet._action(
                    eraseMatches,
                    ActionClass=self.RemoveAction,
                    index=op.getLocation())
                continue

            if isinstance(op, model.IncludeOperation):
                # we need a complete total search set to pass into the sub
                # ops, since they have their compilation deferred
                rebuildTotalSearchSet = True

            if rebuildTotalSearchSet:
                totalSearchSet = self.SearchPathTroveSet(newSearchPath +
                                                         [totalSearchSet],
                                                         graph=self.g)
                newSearchPath = []
                rebuildTotalSearchSet = False

            searchMatches = self._splitFind(self.FindAction, totalSearchSet,
                                            searchSpecs, op)
            localMatches = self._splitFind(self.FindAction, self.dbTroveSet,
                                           localSpecs, op)

            if searchMatches and localMatches:
                matches = searchMatches._action(localMatches,
                                                ActionClass=self.UnionAction,
                                                index=op.getLocation())
            elif searchMatches:
                matches = searchMatches
            else:
                matches = localMatches

            if isinstance(op, model.IncludeOperation):
                assert (not localMatches)
                finalTroveSet = finalTroveSet._action(
                    matches,
                    totalSearchSet,
                    compiler=self,
                    ActionClass=self.IncludeAction,
                    SearchPathClass=self.SearchPathTroveSet)
                totalSearchSet = finalTroveSet.finalSearchSet
                continue
            elif isinstance(op, model.InstallTroveOperation):
                finalTroveSet = finalTroveSet._action(
                    matches,
                    ActionClass=self.UnionAction,
                    index=op.getLocation())
            elif isinstance(op, model.PatchTroveOperation):
                finalTroveSet = finalTroveSet._action(
                    matches,
                    ActionClass=self.PatchAction,
                    index=op.getLocation())
            elif isinstance(op, model.UpdateTroveOperation):
                finalTroveSet = finalTroveSet._action(
                    matches,
                    ActionClass=self.UpdateAction,
                    index=op.getLocation())
            elif isinstance(op, model.OfferTroveOperation):
                finalTroveSet = finalTroveSet._action(
                    matches,
                    ActionClass=self.OptionalAction,
                    index=op.getLocation())
            else:
                assert (0)

            newSearchPath.insert(0, matches)

            for troveSpec in op:
                if troveSpec.name in collections:
                    rebuildTotalSearchSet = True
                    break

        if newSearchPath:
            totalSearchSet = self.SearchPathTroveSet(newSearchPath +
                                                     [totalSearchSet],
                                                     graph=self.g)

        finalTroveSet.searchPath = totalSearchSet

        return finalTroveSet
Beispiel #35
0
 def isGroupName(packageName):
     return trove.troveIsGroup(packageName)