示例#1
0
    def packagesetsForSource(self,
                             archive,
                             sourcepackagename,
                             direct_permissions=True):
        """See `IArchivePermissionSet`."""
        sourcepackagename = self._nameToSourcePackageName(sourcepackagename)
        store = IStore(ArchivePermission)

        if direct_permissions:
            origin = SQL('ArchivePermission, PackagesetSources')
            rset = store.using(origin).find(
                ArchivePermission,
                SQL(
                    '''
                ArchivePermission.packageset = PackagesetSources.packageset
                AND PackagesetSources.sourcepackagename = ?
                AND ArchivePermission.archive = ?
                ''', (sourcepackagename.id, archive.id)))
        else:
            origin = SQL('ArchivePermission, PackagesetSources, '
                         'FlatPackagesetInclusion')
            rset = store.using(origin).find(
                ArchivePermission,
                SQL(
                    '''
                ArchivePermission.packageset = FlatPackagesetInclusion.parent
                AND PackagesetSources.packageset =
                    FlatPackagesetInclusion.child
                AND PackagesetSources.sourcepackagename = ?
                AND ArchivePermission.archive = ?
                ''', (sourcepackagename.id, archive.id)))
        return rset
    def packagesetsForSource(
        self, archive, sourcepackagename, direct_permissions=True):
        """See `IArchivePermissionSet`."""
        sourcepackagename = self._nameToSourcePackageName(sourcepackagename)
        store = IStore(ArchivePermission)

        if direct_permissions:
            origin = SQL('ArchivePermission, PackagesetSources')
            rset = store.using(origin).find(ArchivePermission, SQL('''
                ArchivePermission.packageset = PackagesetSources.packageset
                AND PackagesetSources.sourcepackagename = ?
                AND ArchivePermission.archive = ?
                ''', (sourcepackagename.id, archive.id)))
        else:
            origin = SQL(
                'ArchivePermission, PackagesetSources, '
                'FlatPackagesetInclusion')
            rset = store.using(origin).find(ArchivePermission, SQL('''
                ArchivePermission.packageset = FlatPackagesetInclusion.parent
                AND PackagesetSources.packageset =
                    FlatPackagesetInclusion.child
                AND PackagesetSources.sourcepackagename = ?
                AND ArchivePermission.archive = ?
                ''', (sourcepackagename.id, archive.id)))
        return rset
示例#3
0
 def getSenderAddresses(self, team_names):
     """See `IMailingListSet`."""
     store = IStore(MailingList)
     # First, we need to find all the members of all the mailing lists for
     # the given teams.  Find all of their validated and preferred email
     # addresses of those team members.  Every one of those email addresses
     # are allowed to post to the mailing list.
     Team = ClassAlias(Person)
     tables = (
         Person,
         Join(Account, Account.id == Person.accountID),
         Join(EmailAddress, EmailAddress.personID == Person.id),
         Join(TeamParticipation, TeamParticipation.personID == Person.id),
         Join(MailingList, MailingList.teamID == TeamParticipation.teamID),
         Join(Team, Team.id == MailingList.teamID),
         )
     team_ids, list_ids = self._getTeamIdsAndMailingListIds(team_names)
     team_members = store.using(*tables).find(
         (Team.name, Person.displayname, EmailAddress.email),
         And(TeamParticipation.teamID.is_in(team_ids),
             MailingList.status != MailingListStatus.INACTIVE,
             Person.teamowner == None,
             EmailAddress.status.is_in(EMAIL_ADDRESS_STATUSES),
             Account.status == AccountStatus.ACTIVE,
             ))
     # Second, find all of the email addresses for all of the people who
     # have been explicitly approved for posting to the team mailing lists.
     # This occurs as part of first post moderation, but since they've
     # already been approved for the specific list, we don't need to wait
     # for three global approvals.
     tables = (
         Person,
         Join(Account, Account.id == Person.accountID),
         Join(EmailAddress, EmailAddress.personID == Person.id),
         Join(MessageApproval, MessageApproval.posted_byID == Person.id),
         Join(MailingList,
                  MailingList.id == MessageApproval.mailing_listID),
         Join(Team, Team.id == MailingList.teamID),
         )
     approved_posters = store.using(*tables).find(
         (Team.name, Person.displayname, EmailAddress.email),
         And(MessageApproval.mailing_listID.is_in(list_ids),
             MessageApproval.status.is_in(MESSAGE_APPROVAL_STATUSES),
             EmailAddress.status.is_in(EMAIL_ADDRESS_STATUSES),
             Account.status == AccountStatus.ACTIVE,
             ))
     # Sort allowed posters by team/mailing list.
     by_team = collections.defaultdict(set)
     all_posters = team_members.union(approved_posters)
     for team_name, person_displayname, email in all_posters:
         assert team_name in team_names, (
             'Unexpected team name in results: %s' % team_name)
         value = (person_displayname, email.lower())
         by_team[team_name].add(value)
     # Turn the results into a mapping of lists.
     results = {}
     for team_name, address_set in by_team.items():
         results[team_name] = list(address_set)
     return results
示例#4
0
    def findFromMany(self, object_ids, types=None):
        from lp.registry.model.person import Person

        object_ids = list(object_ids)
        if not object_ids:
            return {}

        store = IStore(XRef)
        extract_type = lambda id: id[0]
        rows = list(
            store.using(XRef).find(
                (XRef.from_type, XRef.from_id, XRef.to_type, XRef.to_id,
                 XRef.creator_id, XRef.date_created, XRef.metadata),
                Or(*[
                    And(XRef.from_type == from_type,
                        XRef.from_id.is_in([id[1] for id in group]))
                    for from_type, group in groupby(
                        sorted(object_ids, key=extract_type), extract_type)
                ]),
                XRef.to_type.is_in(types) if types is not None else True))
        bulk.load(Person, [row[4] for row in rows])
        result = {}
        for row in rows:
            result.setdefault((row[0], row[1]), {})[(row[2], row[3])] = {
                "creator": store.get(Person, row[4]) if row[4] else None,
                "date_created": row[5],
                "metadata": row[6]
            }
        return result
 def getNotificationsToSend(self):
     """See IBugNotificationSet."""
     # We preload the bug activity and the message in order to
     # try to reduce subsequent database calls: try to get direct
     # dependencies at once.  We then also pre-load the pertinent bugs,
     # people (with their own dependencies), and message chunks before
     # returning the notifications that should be processed.
     # Sidestep circular reference.
     from lp.bugs.model.bug import Bug
     store = IStore(BugNotification)
     source = store.using(BugNotification,
                          Join(Message,
                               BugNotification.message == Message.id),
                          LeftJoin(
                             BugActivity,
                             BugNotification.activity == BugActivity.id))
     results = list(source.find(
         (BugNotification, BugActivity, Message),
         BugNotification.status == BugNotificationStatus.PENDING,
         BugNotification.date_emailed == None).order_by(
         'BugNotification.bug', '-BugNotification.id'))
     interval = timedelta(
         minutes=int(config.malone.bugnotification_interval))
     time_limit = (
         datetime.now(pytz.UTC) - interval)
     last_omitted_notification = None
     pending_notifications = []
     people_ids = set()
     bug_ids = set()
     for notification, ignore, ignore in results:
         if notification.message.datecreated > time_limit:
             last_omitted_notification = notification
         elif (last_omitted_notification is not None and
             notification.message.ownerID ==
                last_omitted_notification.message.ownerID and
             notification.bugID == last_omitted_notification.bugID and
             last_omitted_notification.message.datecreated -
             notification.message.datecreated < interval):
             last_omitted_notification = notification
         if last_omitted_notification != notification:
             last_omitted_notification = None
             pending_notifications.append(notification)
             people_ids.add(notification.message.ownerID)
             bug_ids.add(notification.bugID)
     # Now we do some calls that are purely for caching.
     # Converting these into lists forces the queries to execute.
     if pending_notifications:
         list(
             getUtility(IPersonSet).getPrecachedPersonsFromIDs(
                 list(people_ids),
                 need_validity=True,
                 need_preferred_email=True))
         list(
             IStore(Bug).find(Bug, In(Bug.id, list(bug_ids))))
     pending_notifications.reverse()
     return pending_notifications
示例#6
0
 def _getTeamIdsAndMailingListIds(self, team_names):
     """Return a tuple of team and mailing list Ids for the team names."""
     store = IStore(MailingList)
     tables = (Person, Join(MailingList, MailingList.team == Person.id))
     results = set(
         store.using(*tables).find((Person.id, MailingList.id),
                                   And(Person.name.is_in(team_names),
                                       Person.teamowner != None)))
     team_ids = [result[0] for result in results]
     list_ids = [result[1] for result in results]
     return team_ids, list_ids
 def getNotificationsToSend(self):
     """See IBugNotificationSet."""
     # We preload the bug activity and the message in order to
     # try to reduce subsequent database calls: try to get direct
     # dependencies at once.  We then also pre-load the pertinent bugs,
     # people (with their own dependencies), and message chunks before
     # returning the notifications that should be processed.
     # Sidestep circular reference.
     from lp.bugs.model.bug import Bug
     store = IStore(BugNotification)
     source = store.using(
         BugNotification,
         Join(Message, BugNotification.message == Message.id),
         LeftJoin(BugActivity, BugNotification.activity == BugActivity.id))
     results = list(
         source.find(
             (BugNotification, BugActivity, Message),
             BugNotification.status == BugNotificationStatus.PENDING,
             BugNotification.date_emailed == None).order_by(
                 'BugNotification.bug', '-BugNotification.id'))
     interval = timedelta(
         minutes=int(config.malone.bugnotification_interval))
     time_limit = (datetime.now(pytz.UTC) - interval)
     last_omitted_notification = None
     pending_notifications = []
     people_ids = set()
     bug_ids = set()
     for notification, ignore, ignore in results:
         if notification.message.datecreated > time_limit:
             last_omitted_notification = notification
         elif (last_omitted_notification is not None
               and notification.message.ownerID
               == last_omitted_notification.message.ownerID
               and notification.bugID == last_omitted_notification.bugID
               and last_omitted_notification.message.datecreated -
               notification.message.datecreated < interval):
             last_omitted_notification = notification
         if last_omitted_notification != notification:
             last_omitted_notification = None
             pending_notifications.append(notification)
             people_ids.add(notification.message.ownerID)
             bug_ids.add(notification.bugID)
     # Now we do some calls that are purely for caching.
     # Converting these into lists forces the queries to execute.
     if pending_notifications:
         list(
             getUtility(IPersonSet).getPrecachedPersonsFromIDs(
                 list(people_ids),
                 need_validity=True,
                 need_preferred_email=True))
         list(IStore(Bug).find(Bug, In(Bug.id, list(bug_ids))))
     pending_notifications.reverse()
     return pending_notifications
示例#8
0
 def _getTeamIdsAndMailingListIds(self, team_names):
     """Return a tuple of team and mailing list Ids for the team names."""
     store = IStore(MailingList)
     tables = (
         Person,
         Join(MailingList, MailingList.team == Person.id))
     results = set(store.using(*tables).find(
         (Person.id, MailingList.id),
         And(Person.name.is_in(team_names),
             Person.teamowner != None)))
     team_ids = [result[0] for result in results]
     list_ids = [result[1] for result in results]
     return team_ids, list_ids
示例#9
0
 def _preloadProcessors(self, rows):
     # Grab (Builder.id, Processor.id) pairs and stuff them into the
     # Builders' processor caches.
     store = IStore(BuilderProcessor)
     pairs = list(
         store.using(BuilderProcessor, Processor).find(
             (BuilderProcessor.builder_id, BuilderProcessor.processor_id),
             BuilderProcessor.processor_id == Processor.id,
             BuilderProcessor.builder_id.is_in([
                 b.id for b in rows
             ])).order_by(BuilderProcessor.builder_id, Processor.name))
     load(Processor, [pid for bid, pid in pairs])
     for row in rows:
         get_property_cache(row)._processors_cache = []
     for bid, pid in pairs:
         cache = get_property_cache(store.get(Builder, bid))
         cache._processors_cache.append(store.get(Processor, pid))
示例#10
0
 def checkPillarAccess(self, pillars, information_type, person):
     """See `ISharingService`."""
     policies = getUtility(IAccessPolicySource).find([
         (pillar, information_type) for pillar in pillars
     ])
     policy_ids = [policy.id for policy in policies]
     if not policy_ids:
         return False
     store = IStore(AccessPolicyGrant)
     tables = [
         AccessPolicyGrant,
         Join(TeamParticipation,
              TeamParticipation.teamID == AccessPolicyGrant.grantee_id),
     ]
     result = store.using(*tables).find(
         AccessPolicyGrant, AccessPolicyGrant.policy_id.is_in(policy_ids),
         TeamParticipation.personID == person.id)
     return not result.is_empty()
 def checkPillarAccess(self, pillars, information_type, person):
     """See `ISharingService`."""
     policies = getUtility(IAccessPolicySource).find(
         [(pillar, information_type) for pillar in pillars])
     policy_ids = [policy.id for policy in policies]
     if not policy_ids:
         return False
     store = IStore(AccessPolicyGrant)
     tables = [
         AccessPolicyGrant,
         Join(
             TeamParticipation,
             TeamParticipation.teamID == AccessPolicyGrant.grantee_id),
         ]
     result = store.using(*tables).find(
         AccessPolicyGrant,
         AccessPolicyGrant.policy_id.is_in(policy_ids),
         TeamParticipation.personID == person.id)
     return not result.is_empty()
示例#12
0
 def getSubscribedAddresses(self, team_names):
     """See `IMailingListSet`."""
     store = IStore(MailingList)
     Team = ClassAlias(Person)
     tables = (
         EmailAddress,
         Join(Person, Person.id == EmailAddress.personID),
         Join(Account, Account.id == Person.accountID),
         Join(TeamParticipation, TeamParticipation.personID == Person.id),
         Join(
             MailingListSubscription,
             MailingListSubscription.personID == Person.id),
         Join(
             MailingList,
             MailingList.id == MailingListSubscription.mailing_listID),
         Join(Team, Team.id == MailingList.teamID),
         )
     team_ids, list_ids = self._getTeamIdsAndMailingListIds(team_names)
     preferred = store.using(*tables).find(
         (EmailAddress.email, Person.displayname, Team.name),
         And(MailingListSubscription.mailing_listID.is_in(list_ids),
             TeamParticipation.teamID.is_in(team_ids),
             MailingList.teamID == TeamParticipation.teamID,
             MailingList.status != MailingListStatus.INACTIVE,
             Account.status == AccountStatus.ACTIVE,
             Or(
                 And(MailingListSubscription.email_addressID == None,
                     EmailAddress.status == EmailAddressStatus.PREFERRED),
                 EmailAddress.id ==
                     MailingListSubscription.email_addressID),
             ))
     # Sort by team name.
     by_team = collections.defaultdict(set)
     for email, display_name, team_name in preferred:
         assert team_name in team_names, (
             'Unexpected team name in results: %s' % team_name)
         value = (display_name, email.lower())
         by_team[team_name].add(value)
     # Turn the results into a mapping of lists.
     results = {}
     for team_name, address_set in by_team.items():
         results[team_name] = list(address_set)
     return results
示例#13
0
 def getSubscribedAddresses(self, team_names):
     """See `IMailingListSet`."""
     store = IStore(MailingList)
     Team = ClassAlias(Person)
     tables = (
         EmailAddress,
         Join(Person, Person.id == EmailAddress.personID),
         Join(Account, Account.id == Person.accountID),
         Join(TeamParticipation, TeamParticipation.personID == Person.id),
         Join(MailingListSubscription,
              MailingListSubscription.personID == Person.id),
         Join(MailingList,
              MailingList.id == MailingListSubscription.mailing_listID),
         Join(Team, Team.id == MailingList.teamID),
     )
     team_ids, list_ids = self._getTeamIdsAndMailingListIds(team_names)
     preferred = store.using(*tables).find(
         (EmailAddress.email, Person.display_name, Team.name),
         And(
             MailingListSubscription.mailing_listID.is_in(list_ids),
             TeamParticipation.teamID.is_in(team_ids),
             MailingList.teamID == TeamParticipation.teamID,
             MailingList.status != MailingListStatus.INACTIVE,
             Account.status == AccountStatus.ACTIVE,
             Or(
                 And(MailingListSubscription.email_addressID == None,
                     EmailAddress.status == EmailAddressStatus.PREFERRED),
                 EmailAddress.id ==
                 MailingListSubscription.email_addressID),
         ))
     # Sort by team name.
     by_team = collections.defaultdict(set)
     for email, display_name, team_name in preferred:
         assert team_name in team_names, (
             'Unexpected team name in results: %s' % team_name)
         value = (display_name, email.lower())
         by_team[team_name].add(value)
     # Turn the results into a mapping of lists.
     results = {}
     for team_name, address_set in by_team.items():
         results[team_name] = list(address_set)
     return results
示例#14
0
    def getPeopleWithoutAccess(self, concrete_artifact, people):
        """See `ISharingService`."""
        # Public artifacts allow everyone to have access.
        access_artifacts = list(
            getUtility(IAccessArtifactSource).find([concrete_artifact]))
        if not access_artifacts:
            return []

        access_artifact = access_artifacts[0]
        # Determine the grantees who have access via an access policy grant.
        policy_grantees = (
            Select(
                (AccessPolicyGrant.grantee_id,),
                where=And(
                    AccessPolicyArtifact.abstract_artifact == access_artifact,
                    AccessPolicyGrant.policy_id ==
                        AccessPolicyArtifact.policy_id)))

        # Determine the grantees who have access via an access artifact grant.
        artifact_grantees = (
            Select(
                (AccessArtifactGrant.grantee_id,),
                where=And(
                    AccessArtifactGrant.abstract_artifact_id ==
                        access_artifact.id)))

        # Find the people who can see the artifacts.
        person_ids = [person.id for person in people]
        store = IStore(AccessArtifactGrant)
        tables = [
            Person,
            Join(TeamParticipation, TeamParticipation.personID == Person.id)]
        result_set = store.using(*tables).find(
            Person,
            Or(
                In(TeamParticipation.teamID, policy_grantees),
                In(TeamParticipation.teamID, artifact_grantees)),
            In(Person.id, person_ids))

        return set(people).difference(set(result_set))
示例#15
0
    def getSourceFiles(self, distroseries, pocket):
        """Fetch publishing information about all published source files.

        The publishing information consists of tuples with 'sourcename',
        'filename' and 'component' strings, in this order.

        :param distroseries: target `IDistroSeries`
        :param pocket: target `PackagePublishingPocket`

        :return: a `ResultSet` with the source files information tuples.
        """
        store = IStore(SourcePackagePublishingHistory)
        result_set = store.using(SourcePackageFilePublishing).find(
            (SourcePackageFilePublishing.sourcepackagename,
             SourcePackageFilePublishing.libraryfilealiasfilename,
             SourcePackageFilePublishing.componentname),
            SourcePackageFilePublishing.distribution == self.distro,
            SourcePackageFilePublishing.archive == self.publisher.archive,
            SourcePackageFilePublishing.distroseriesname == distroseries.name,
            SourcePackageFilePublishing.pocket == pocket,
            SourcePackageFilePublishing.publishingstatus ==
                PackagePublishingStatus.PUBLISHED)

        return result_set.order_by(Desc(SourcePackageFilePublishing.id))
示例#16
0
    def getSourceFiles(self, distroseries, pocket):
        """Fetch publishing information about all published source files.

        The publishing information consists of tuples with 'sourcename',
        'filename' and 'component' strings, in this order.

        :param distroseries: target `IDistroSeries`
        :param pocket: target `PackagePublishingPocket`

        :return: a `ResultSet` with the source files information tuples.
        """
        store = IStore(SourcePackagePublishingHistory)
        result_set = store.using(SourcePackageFilePublishing).find(
            (SourcePackageFilePublishing.sourcepackagename,
             SourcePackageFilePublishing.libraryfilealiasfilename,
             SourcePackageFilePublishing.componentname),
            SourcePackageFilePublishing.distribution == self.distro,
            SourcePackageFilePublishing.archive == self.publisher.archive,
            SourcePackageFilePublishing.distroseriesname == distroseries.name,
            SourcePackageFilePublishing.pocket == pocket,
            SourcePackageFilePublishing.publishingstatus ==
            PackagePublishingStatus.PUBLISHED)

        return result_set.order_by(Desc(SourcePackageFilePublishing.id))
示例#17
0
class Collection(object):
    """An arbitrary collection of database objects.

    Works as a Storm wrapper: create a collection based on another
    collection, adding joins and select conditions to taste.

    As in any Storm query, you can select any mix of classes and
    individual columns or other Storm expressions.
    """

    # Default table for this collection that will always be included.
    # Derived collection classes can use this to say what type they are
    # a collection of.
    starting_table = None

    def __init__(self, *args, **kwargs):
        """Construct a collection, possibly based on another one.

        :param base: Optional collection that this collection is based
            on.  The new collection will inherit its configuration.
        :param conditions: Optional Storm select conditions, e.g.
            `MyClass.attribute > 2`.
        :param classes: A class, or tuple or list of classes, that
            should go into the "FROM" clause of the new collection.
            This need not include classes that are already in the
            base collection, or that are included as outer joins.
        :param store: Optional: Storm `Store` to use.
        """
        starting_tables = []

        if len(args) >= 1 and isinstance(args[0], Collection):
            # There's a base collection.
            base = args[0]
            conditions = args[1:]
        else:
            # We're starting a fresh collection.
            base = None
            conditions = args
            if self.starting_table is not None:
                starting_tables = [self.starting_table]

        self.base = base

        if base is None:
            base_conditions = (True, )
            base_tables = []
        else:
            self.store = base.store
            base_conditions = base.conditions
            base_tables = list(base.tables)

        self.store = kwargs.get('store')
        if self.store is None:
            from lp.services.librarian.model import LibraryFileAlias
            self.store = IStore(LibraryFileAlias)

        self.tables = (starting_tables + base_tables +
                       self._parseTablesArg(kwargs.get('tables', [])))

        self.conditions = base_conditions + conditions

    def refine(self, *args, **kwargs):
        """Return a copy of self with further restrictions, tables etc."""
        cls = self.__class__
        return cls(self, *args, **kwargs)

    def _parseTablesArg(self, tables):
        """Turn tables argument into a list.

        :param tables: A class, or tuple of classes, or list of classes.
        :param return: All classes that were passed in, as a list.
        """
        if isinstance(tables, tuple):
            return list(tables)
        elif isinstance(tables, list):
            return tables
        else:
            return [tables]

    def use(self, store):
        """Return a copy of this collection that uses the given store."""
        return self.refine(store=store)

    def joinInner(self, cls, *conditions):
        """Convenience method: inner-join `cls` into the query.

        This is equivalent to creating a `Collection` based on this one
        but with `cls` and `conditions` added.
        """
        return self.refine(tables=[Join(cls, *conditions)])

    def joinOuter(self, cls, *conditions):
        """Outer-join `cls` into the query."""
        return self.refine(tables=[LeftJoin(cls, *conditions)])

    def select(self, *values):
        """Return a result set containing the requested `values`.

        If no values are requested, this selects the type of object that
        the Collection is a collection of.
        """
        if len(self.tables) == 0:
            source = self.store
        else:
            source = self.store.using(*self.tables)

        if len(values) > 1:
            # Selecting a tuple of values.  Pass it to Storm unchanged.
            pass
        elif len(values) == 1:
            # One value requested.  Unpack for convenience.
            values = values[0]
        else:
            # Select the starting table by default.
            assert self.starting_table is not None, (
                "Collection %s does not define a starting table." %
                self.__class__.__name__)
            values = self.starting_table

        return source.find(values, *self.conditions)
示例#18
0
 def getSenderAddresses(self, team_names):
     """See `IMailingListSet`."""
     store = IStore(MailingList)
     # First, we need to find all the members of all the mailing lists for
     # the given teams.  Find all of their validated and preferred email
     # addresses of those team members.  Every one of those email addresses
     # are allowed to post to the mailing list.
     Team = ClassAlias(Person)
     tables = (
         Person,
         Join(Account, Account.id == Person.accountID),
         Join(EmailAddress, EmailAddress.personID == Person.id),
         Join(TeamParticipation, TeamParticipation.personID == Person.id),
         Join(MailingList, MailingList.teamID == TeamParticipation.teamID),
         Join(Team, Team.id == MailingList.teamID),
     )
     team_ids, list_ids = self._getTeamIdsAndMailingListIds(team_names)
     team_members = store.using(*tables).find(
         (Team.name, Person.display_name, EmailAddress.email),
         And(
             TeamParticipation.teamID.is_in(team_ids),
             MailingList.status != MailingListStatus.INACTIVE,
             Person.teamowner == None,
             EmailAddress.status.is_in(EMAIL_ADDRESS_STATUSES),
             Account.status == AccountStatus.ACTIVE,
         ))
     # Second, find all of the email addresses for all of the people who
     # have been explicitly approved for posting to the team mailing lists.
     # This occurs as part of first post moderation, but since they've
     # already been approved for the specific list, we don't need to wait
     # for three global approvals.
     tables = (
         Person,
         Join(Account, Account.id == Person.accountID),
         Join(EmailAddress, EmailAddress.personID == Person.id),
         Join(MessageApproval, MessageApproval.posted_byID == Person.id),
         Join(MailingList,
              MailingList.id == MessageApproval.mailing_listID),
         Join(Team, Team.id == MailingList.teamID),
     )
     approved_posters = store.using(*tables).find(
         (Team.name, Person.display_name, EmailAddress.email),
         And(
             MessageApproval.mailing_listID.is_in(list_ids),
             MessageApproval.status.is_in(MESSAGE_APPROVAL_STATUSES),
             EmailAddress.status.is_in(EMAIL_ADDRESS_STATUSES),
             Account.status == AccountStatus.ACTIVE,
         ))
     # Sort allowed posters by team/mailing list.
     by_team = collections.defaultdict(set)
     all_posters = team_members.union(approved_posters)
     for team_name, person_displayname, email in all_posters:
         assert team_name in team_names, (
             'Unexpected team name in results: %s' % team_name)
         value = (person_displayname, email.lower())
         by_team[team_name].add(value)
     # Turn the results into a mapping of lists.
     results = {}
     for team_name, address_set in by_team.items():
         results[team_name] = list(address_set)
     return results
示例#19
0
def search_specifications(context,
                          base_clauses,
                          user,
                          sort=None,
                          quantity=None,
                          spec_filter=None,
                          tables=[],
                          default_acceptance=False,
                          need_people=True,
                          need_branches=True,
                          need_workitems=False):
    store = IStore(Specification)
    if not default_acceptance:
        default = SpecificationFilter.INCOMPLETE
        options = set(
            [SpecificationFilter.COMPLETE, SpecificationFilter.INCOMPLETE])
    else:
        default = SpecificationFilter.ACCEPTED
        options = set([
            SpecificationFilter.ACCEPTED, SpecificationFilter.DECLINED,
            SpecificationFilter.PROPOSED
        ])
    if not spec_filter:
        spec_filter = [default]

    if not set(spec_filter) & options:
        spec_filter.append(default)

    if not tables:
        tables = [Specification]
    clauses = base_clauses
    product_tables, product_clauses = get_specification_active_product_filter(
        context)
    tables.extend(product_tables)
    clauses.extend(product_clauses)
    # If there are any base or product clauses, they typically have good
    # selectivity, so use a CTE to force PostgreSQL to calculate them
    # up-front rather than doing a sequential scan for visible
    # specifications.
    if clauses:
        RelevantSpecification = Table('RelevantSpecification')
        relevant_specification_cte = With(
            RelevantSpecification.name,
            Select(Specification.id, And(clauses), tables=tables))
        store = store.with_(relevant_specification_cte)
        tables = [Specification]
        clauses = [
            Specification.id.is_in(Select(Column('id',
                                                 RelevantSpecification))),
        ]
    clauses.extend(get_specification_privacy_filter(user))
    clauses.extend(get_specification_filters(spec_filter))

    # Sort by priority descending, by default.
    if sort is None or sort == SpecificationSort.PRIORITY:
        order = [
            Desc(Specification.priority), Specification.definition_status,
            Specification.name
        ]
    elif sort == SpecificationSort.DATE:
        if SpecificationFilter.COMPLETE in spec_filter:
            # If we are showing completed, we care about date completed.
            order = [Desc(Specification.date_completed), Specification.id]
        else:
            # If not specially looking for complete, we care about date
            # registered.
            order = []
            show_proposed = set(
                [SpecificationFilter.ALL, SpecificationFilter.PROPOSED])
            if default_acceptance and not (set(spec_filter) & show_proposed):
                order.append(Desc(Specification.date_goal_decided))
            order.extend([Desc(Specification.datecreated), Specification.id])
    else:
        order = [sort]

    # Set the _known_viewers property for each specification, as well as
    # preloading the objects involved, if asked.
    def preload_hook(rows):
        person_ids = set()
        work_items_by_spec = defaultdict(list)
        for spec in rows:
            if need_people:
                person_ids |= set(
                    [spec._assigneeID, spec._approverID, spec._drafterID])
            if need_branches:
                get_property_cache(spec).linked_branches = []
        if need_workitems:
            work_items = load_referencing(
                SpecificationWorkItem,
                rows, ['specification_id'],
                extra_conditions=[SpecificationWorkItem.deleted == False])
            for workitem in work_items:
                person_ids.add(workitem.assignee_id)
                work_items_by_spec[workitem.specification_id].append(workitem)
        person_ids -= set([None])
        if need_people:
            list(
                getUtility(IPersonSet).getPrecachedPersonsFromIDs(
                    person_ids, need_validity=True))
        if need_workitems:
            for spec in rows:
                get_property_cache(spec).work_items = sorted(
                    work_items_by_spec[spec.id], key=lambda wi: wi.sequence)
        if need_branches:
            spec_branches = load_referencing(SpecificationBranch, rows,
                                             ['specificationID'])
            for sbranch in spec_branches:
                spec_cache = get_property_cache(sbranch.specification)
                spec_cache.linked_branches.append(sbranch)

    decorators = []
    if user is not None and not IPersonRoles(user).in_admin:
        decorators.append(_make_cache_user_can_view_spec(user))
    results = store.using(*tables).find(
        Specification, *clauses).order_by(*order).config(limit=quantity)
    return DecoratedResultSet(
        results,
        lambda row: reduce(lambda task, dec: dec(task), decorators, row),
        pre_iter_hook=preload_hook)
    def getExtendedRevisionDetails(self, user, revisions):
        """See `IBranchCollection`."""

        if not revisions:
            return []
        branch = revisions[0].branch

        def make_rev_info(
                branch_revision, merge_proposal_revs, linked_bugtasks):
            rev_info = {
                'revision': branch_revision,
                'linked_bugtasks': None,
                'merge_proposal': None,
                }
            merge_proposal = merge_proposal_revs.get(branch_revision.sequence)
            rev_info['merge_proposal'] = merge_proposal
            if merge_proposal is not None:
                rev_info['linked_bugtasks'] = linked_bugtasks.get(
                    merge_proposal.source_branch.id)
            return rev_info

        rev_nos = [revision.sequence for revision in revisions]
        merge_proposals = self.getMergeProposals(
                target_branch=branch, merged_revnos=rev_nos,
                statuses=[BranchMergeProposalStatus.MERGED])
        merge_proposal_revs = dict(
                [(mp.merged_revno, mp) for mp in merge_proposals])
        source_branch_ids = [mp.source_branch.id for mp in merge_proposals]
        linked_bugtasks = defaultdict(list)

        if source_branch_ids:
            # We get the bugtasks for our merge proposal branches

            # First, the bug ids
            params = BugTaskSearchParams(
                user=user, status=None,
                linked_branches=any(*source_branch_ids))
            bug_ids = getUtility(IBugTaskSet).searchBugIds(params)

            # Then the bug tasks and branches
            store = IStore(BugBranch)
            rs = store.using(
                BugBranch,
                Join(BugTask, BugTask.bugID == BugBranch.bugID),
            ).find(
                (BugTask, BugBranch),
                BugBranch.bugID.is_in(bug_ids),
                BugBranch.branchID.is_in(source_branch_ids)
            )

            # Build up a collection of bugtasks for each branch
            bugtasks_for_branch = defaultdict(list)
            for bugtask, bugbranch in rs:
                bugtasks_for_branch[bugbranch.branch].append(bugtask)

            # Now filter those down to one bugtask per branch
            for branch, tasks in bugtasks_for_branch.iteritems():
                linked_bugtasks[branch.id].extend(
                    filter_bugtasks_by_context(branch.target.context, tasks))

        return [make_rev_info(rev, merge_proposal_revs, linked_bugtasks)
                for rev in revisions]
示例#21
0
    def getExtendedRevisionDetails(self, user, revisions):
        """See `IBranchCollection`."""

        if not revisions:
            return []
        branch = revisions[0].branch

        def make_rev_info(branch_revision, merge_proposal_revs,
                          linked_bugtasks):
            rev_info = {
                'revision': branch_revision,
                'linked_bugtasks': None,
                'merge_proposal': None,
            }
            merge_proposal = merge_proposal_revs.get(branch_revision.sequence)
            rev_info['merge_proposal'] = merge_proposal
            if merge_proposal is not None:
                rev_info['linked_bugtasks'] = linked_bugtasks.get(
                    merge_proposal.source_branch.id)
            return rev_info

        rev_nos = [revision.sequence for revision in revisions]
        merge_proposals = self.getMergeProposals(
            target_branch=branch,
            merged_revnos=rev_nos,
            statuses=[BranchMergeProposalStatus.MERGED])
        merge_proposal_revs = dict([(mp.merged_revno, mp)
                                    for mp in merge_proposals])
        source_branch_ids = [mp.source_branch.id for mp in merge_proposals]
        linked_bugtasks = defaultdict(list)

        if source_branch_ids:
            # We get the bugtasks for our merge proposal branches

            # First, the bug ids
            params = BugTaskSearchParams(
                user=user,
                status=None,
                linked_branches=any(*source_branch_ids))
            bug_ids = getUtility(IBugTaskSet).searchBugIds(params)

            # Then the bug tasks and branches
            store = IStore(BugBranch)
            rs = store.using(
                BugBranch,
                Join(BugTask, BugTask.bugID == BugBranch.bugID),
            ).find((BugTask, BugBranch), BugBranch.bugID.is_in(bug_ids),
                   BugBranch.branchID.is_in(source_branch_ids))

            # Build up a collection of bugtasks for each branch
            bugtasks_for_branch = defaultdict(list)
            for bugtask, bugbranch in rs:
                bugtasks_for_branch[bugbranch.branch].append(bugtask)

            # Now filter those down to one bugtask per branch
            for branch, tasks in bugtasks_for_branch.iteritems():
                linked_bugtasks[branch.id].extend(
                    filter_bugtasks_by_context(branch.target.context, tasks))

        return [
            make_rev_info(rev, merge_proposal_revs, linked_bugtasks)
            for rev in revisions
        ]
示例#22
0
    def _findBuildCandidate(self):
        """Find a candidate job for dispatch to an idle buildd slave.

        The pending BuildQueue item with the highest score for this builder
        or None if no candidate is available.

        :return: A candidate job.
        """
        logger = self._getSlaveScannerLogger()

        job_type_conditions = []
        job_sources = specific_build_farm_job_sources()
        for job_type, job_source in job_sources.iteritems():
            query = job_source.addCandidateSelectionCriteria(
                self.processor, self.virtualized)
            if query:
                job_type_conditions.append(
                    Or(BuildFarmJob.job_type != job_type, Exists(SQL(query))))

        def get_int_feature_flag(flag):
            value_str = getFeatureFlag(flag)
            if value_str is not None:
                try:
                    return int(value_str)
                except ValueError:
                    logger.error('invalid %s %r', flag, value_str)

        score_conditions = []
        minimum_scores = set()
        for processor in self.processors:
            minimum_scores.add(
                get_int_feature_flag('buildmaster.minimum_score.%s' %
                                     processor.name))
        minimum_scores.add(get_int_feature_flag('buildmaster.minimum_score'))
        minimum_scores.discard(None)
        # If there are minimum scores set for any of the processors
        # supported by this builder, use the highest of them.  This is a bit
        # weird and not completely ideal, but it's a safe conservative
        # option and avoids substantially complicating the candidate query.
        if minimum_scores:
            score_conditions.append(
                BuildQueue.lastscore >= max(minimum_scores))

        store = IStore(self.__class__)
        candidate_jobs = store.using(BuildQueue, BuildFarmJob).find(
            (BuildQueue.id, ),
            BuildFarmJob.id == BuildQueue._build_farm_job_id,
            BuildQueue.status == BuildQueueStatus.WAITING,
            Or(
                BuildQueue.processorID.is_in(
                    Select(BuilderProcessor.processor_id,
                           tables=[BuilderProcessor],
                           where=BuilderProcessor.builder == self)),
                BuildQueue.processor == None),
            BuildQueue.virtualized == self.virtualized,
            BuildQueue.builder == None,
            And(*(job_type_conditions + score_conditions))).order_by(
                Desc(BuildQueue.lastscore), BuildQueue.id)

        # Only try the first handful of jobs. It's much easier on the
        # database, the chance of a large prefix of the queue being
        # bad candidates is negligible, and we want reasonably bounded
        # per-cycle performance even if the prefix is large.
        for (candidate_id, ) in candidate_jobs[:10]:
            candidate = getUtility(IBuildQueueSet).get(candidate_id)
            job_source = job_sources[removeSecurityProxy(
                candidate)._build_farm_job.job_type]
            candidate_approved = job_source.postprocessCandidate(
                candidate, logger)
            if candidate_approved:
                return candidate

        return None
    def getRecipientFilterData(self, bug, recipient_to_sources, notifications):
        """See `IBugNotificationSet`."""
        if not notifications or not recipient_to_sources:
            # This is a shortcut that will remove some error conditions.
            return {}
        # Collect bug mute information.
        from lp.bugs.model.bug import BugMute
        store = IStore(BugMute)
        muted_person_ids = set(
            list(store.find(BugMute.person_id, BugMute.bug == bug)))
        # This makes two calls to the database to get all the
        # information we need. The first call gets the filter ids and
        # descriptions for each recipient, and then we divide up the
        # information per recipient.
        # First we get some intermediate data structures set up.
        source_person_id_map = {}
        recipient_id_map = {}
        for recipient, sources in recipient_to_sources.items():
            if recipient.id in muted_person_ids:
                continue
            source_person_ids = set()
            recipient_id_map[recipient.id] = {
                'principal': recipient,
                'filters': {},
                'source person ids': source_person_ids,
                'sources': sources,
            }
            for source in sources:
                person_id = source.person.id
                source_person_ids.add(person_id)
                data = source_person_id_map.get(person_id)
                if data is None:
                    # The "filters" key is the only one we actually use.  The
                    # rest are useful for debugging and introspecting.
                    data = {
                        'sources': set(),
                        'person': source.person,
                        'filters': {}
                    }
                    source_person_id_map[person_id] = data
                data['sources'].add(source)
        # Now we actually look for the filters.
        store = IStore(BugSubscriptionFilter)
        source = store.using(
            BugSubscriptionFilter,
            Join(
                BugNotificationFilter, BugSubscriptionFilter.id ==
                BugNotificationFilter.bug_subscription_filter_id),
            Join(
                StructuralSubscription,
                BugSubscriptionFilter.structural_subscription_id ==
                StructuralSubscription.id))
        if len(source_person_id_map) == 0:
            filter_data = []
        else:
            filter_data = source.find(
                (StructuralSubscription.subscriberID, BugSubscriptionFilter.id,
                 BugSubscriptionFilter.description),
                In(BugNotificationFilter.bug_notification_id,
                   [notification.id for notification in notifications]),
                In(StructuralSubscription.subscriberID,
                   source_person_id_map.keys()))
        filter_ids = []
        # Record the filters for each source.
        for source_person_id, filter_id, filter_description in filter_data:
            source_person_id_map[source_person_id]['filters'][filter_id] = (
                filter_description)
            filter_ids.append(filter_id)

        # This is only necessary while production and sample data have
        # structural subscriptions without filters.  Assign the filters to
        # each recipient.
        no_filter_marker = -1

        for recipient_data in recipient_id_map.values():
            for source_person_id in recipient_data['source person ids']:
                recipient_data['filters'].update(
                    source_person_id_map[source_person_id]['filters']
                    or {no_filter_marker: None})
        if filter_ids:
            # Now we get the information about subscriptions that might be
            # filtered and take that into account.
            mute_data = store.find((BugSubscriptionFilterMute.person_id,
                                    BugSubscriptionFilterMute.filter_id),
                                   In(BugSubscriptionFilterMute.person_id,
                                      recipient_id_map.keys()),
                                   In(BugSubscriptionFilterMute.filter_id,
                                      filter_ids))
            for person_id, filter_id in mute_data:
                if filter_id in recipient_id_map[person_id]['filters']:
                    del recipient_id_map[person_id]['filters'][filter_id]
                # This may look odd, but it's here to prevent members of
                # a team with a contact address still getting direct
                # email about a bug after they've muted the
                # subscription.
                if no_filter_marker in recipient_id_map[person_id]['filters']:
                    del recipient_id_map[person_id]['filters'][
                        no_filter_marker]
        # Now recipient_id_map has all the information we need.  Let's
        # build the final result and return it.
        result = {}
        for recipient_data in recipient_id_map.values():
            if recipient_data['filters']:
                filter_descriptions = [
                    description
                    for description in recipient_data['filters'].values()
                    if description
                ]
                filter_descriptions.sort()  # This is good for tests.
                result[recipient_data['principal']] = {
                    'sources': recipient_data['sources'],
                    'filter descriptions': filter_descriptions
                }
        return result
示例#24
0
class Collection(object):
    """An arbitrary collection of database objects.

    Works as a Storm wrapper: create a collection based on another
    collection, adding joins and select conditions to taste.

    As in any Storm query, you can select any mix of classes and
    individual columns or other Storm expressions.
    """

    # Default table for this collection that will always be included.
    # Derived collection classes can use this to say what type they are
    # a collection of.
    starting_table = None

    def __init__(self, *args, **kwargs):
        """Construct a collection, possibly based on another one.

        :param base: Optional collection that this collection is based
            on.  The new collection will inherit its configuration.
        :param conditions: Optional Storm select conditions, e.g.
            `MyClass.attribute > 2`.
        :param classes: A class, or tuple or list of classes, that
            should go into the "FROM" clause of the new collection.
            This need not include classes that are already in the
            base collection, or that are included as outer joins.
        :param store: Optional: Storm `Store` to use.
        """
        starting_tables = []

        if len(args) >= 1 and isinstance(args[0], Collection):
            # There's a base collection.
            base = args[0]
            conditions = args[1:]
        else:
            # We're starting a fresh collection.
            base = None
            conditions = args
            if self.starting_table is not None:
                starting_tables = [self.starting_table]

        self.base = base

        if base is None:
            base_conditions = (True, )
            base_tables = []
        else:
            self.store = base.store
            base_conditions = base.conditions
            base_tables = list(base.tables)

        self.store = kwargs.get('store')
        if self.store is None:
            from lp.services.librarian.model import LibraryFileAlias
            self.store = IStore(LibraryFileAlias)

        self.tables = (
            starting_tables + base_tables +
            self._parseTablesArg(kwargs.get('tables', [])))

        self.conditions = base_conditions + conditions

    def refine(self, *args, **kwargs):
        """Return a copy of self with further restrictions, tables etc."""
        cls = self.__class__
        return cls(self, *args, **kwargs)

    def _parseTablesArg(self, tables):
        """Turn tables argument into a list.

        :param tables: A class, or tuple of classes, or list of classes.
        :param return: All classes that were passed in, as a list.
        """
        if isinstance(tables, tuple):
            return list(tables)
        elif isinstance(tables, list):
            return tables
        else:
            return [tables]

    def use(self, store):
        """Return a copy of this collection that uses the given store."""
        return self.refine(store=store)

    def joinInner(self, cls, *conditions):
        """Convenience method: inner-join `cls` into the query.

        This is equivalent to creating a `Collection` based on this one
        but with `cls` and `conditions` added.
        """
        return self.refine(tables=[Join(cls, *conditions)])

    def joinOuter(self, cls, *conditions):
        """Outer-join `cls` into the query."""
        return self.refine(tables=[LeftJoin(cls, *conditions)])

    def select(self, *values):
        """Return a result set containing the requested `values`.

        If no values are requested, this selects the type of object that
        the Collection is a collection of.
        """
        if len(self.tables) == 0:
            source = self.store
        else:
            source = self.store.using(*self.tables)

        if len(values) > 1:
            # Selecting a tuple of values.  Pass it to Storm unchanged.
            pass
        elif len(values) == 1:
            # One value requested.  Unpack for convenience.
            values = values[0]
        else:
            # Select the starting table by default.
            assert self.starting_table is not None, (
                "Collection %s does not define a starting table." %
                    self.__class__.__name__)
            values = self.starting_table

        return source.find(values, *self.conditions)
def search_specifications(context, base_clauses, user, sort=None,
                          quantity=None, spec_filter=None, tables=[],
                          default_acceptance=False, need_people=True,
                          need_branches=True, need_workitems=False):
    store = IStore(Specification)
    if not default_acceptance:
        default = SpecificationFilter.INCOMPLETE
        options = set([
            SpecificationFilter.COMPLETE, SpecificationFilter.INCOMPLETE])
    else:
        default = SpecificationFilter.ACCEPTED
        options = set([
            SpecificationFilter.ACCEPTED, SpecificationFilter.DECLINED,
            SpecificationFilter.PROPOSED])
    if not spec_filter:
        spec_filter = [default]

    if not set(spec_filter) & options:
        spec_filter.append(default)

    if not tables:
        tables = [Specification]
    clauses = base_clauses
    product_table, product_clauses = get_specification_active_product_filter(
        context)
    tables.extend(product_table)
    for extend in (get_specification_privacy_filter(user),
        get_specification_filters(spec_filter), product_clauses):
        clauses.extend(extend)

    # Sort by priority descending, by default.
    if sort is None or sort == SpecificationSort.PRIORITY:
        order = [
            Desc(Specification.priority), Specification.definition_status,
            Specification.name]
    elif sort == SpecificationSort.DATE:
        if SpecificationFilter.COMPLETE in spec_filter:
            # If we are showing completed, we care about date completed.
            order = [Desc(Specification.date_completed), Specification.id]
        else:
            # If not specially looking for complete, we care about date
            # registered.
            order = []
            show_proposed = set(
                [SpecificationFilter.ALL, SpecificationFilter.PROPOSED])
            if default_acceptance and not (set(spec_filter) & show_proposed):
                order.append(Desc(Specification.date_goal_decided))
            order.extend([Desc(Specification.datecreated), Specification.id])
    else:
        order = [sort]
    # Set the _known_viewers property for each specification, as well as
    # preloading the objects involved, if asked.
    def preload_hook(rows):
        person_ids = set()
        work_items_by_spec = defaultdict(list)
        for spec in rows:
            if need_people: 
                person_ids |= set(
                    [spec._assigneeID, spec._approverID, spec._drafterID])
            if need_branches:
                get_property_cache(spec).linked_branches = []
        if need_workitems:
            work_items = load_referencing(
                SpecificationWorkItem, rows, ['specification_id'],
                extra_conditions=[SpecificationWorkItem.deleted == False])
            for workitem in work_items:
                person_ids.add(workitem.assignee_id)
                work_items_by_spec[workitem.specification_id].append(workitem)
        person_ids -= set([None])
        if need_people:
            list(getUtility(IPersonSet).getPrecachedPersonsFromIDs(
                person_ids, need_validity=True))
        if need_workitems:
            for spec in rows:
                get_property_cache(spec).work_items = sorted(
                    work_items_by_spec[spec.id], key=lambda wi: wi.sequence)
        if need_branches:
            spec_branches = load_referencing(
                SpecificationBranch, rows, ['specificationID'])
            for sbranch in spec_branches:
                spec_cache = get_property_cache(sbranch.specification)
                spec_cache.linked_branches.append(sbranch)

    decorators = []
    if user is not None and not IPersonRoles(user).in_admin:
        decorators.append(_make_cache_user_can_view_spec(user))
    results = store.using(*tables).find(
        Specification, *clauses).order_by(*order).config(limit=quantity)
    return DecoratedResultSet(
        results,
        lambda row: reduce(lambda task, dec: dec(task), decorators, row),
        pre_iter_hook=preload_hook)
    def getRecipientFilterData(self, bug, recipient_to_sources,
                               notifications):
        """See `IBugNotificationSet`."""
        if not notifications or not recipient_to_sources:
            # This is a shortcut that will remove some error conditions.
            return {}
        # Collect bug mute information.
        from lp.bugs.model.bug import BugMute
        store = IStore(BugMute)
        muted_person_ids = set(list(
            store.find(BugMute.person_id,
                       BugMute.bug == bug)))
        # This makes two calls to the database to get all the
        # information we need. The first call gets the filter ids and
        # descriptions for each recipient, and then we divide up the
        # information per recipient.
        # First we get some intermediate data structures set up.
        source_person_id_map = {}
        recipient_id_map = {}
        for recipient, sources in recipient_to_sources.items():
            if recipient.id in muted_person_ids:
                continue
            source_person_ids = set()
            recipient_id_map[recipient.id] = {
                'principal': recipient,
                'filters': {},
                'source person ids': source_person_ids,
                'sources': sources,
                }
            for source in sources:
                person_id = source.person.id
                source_person_ids.add(person_id)
                data = source_person_id_map.get(person_id)
                if data is None:
                    # The "filters" key is the only one we actually use.  The
                    # rest are useful for debugging and introspecting.
                    data = {'sources': set(),
                            'person': source.person,
                            'filters': {}}
                    source_person_id_map[person_id] = data
                data['sources'].add(source)
        # Now we actually look for the filters.
        store = IStore(BugSubscriptionFilter)
        source = store.using(
            BugSubscriptionFilter,
            Join(BugNotificationFilter,
                 BugSubscriptionFilter.id ==
                    BugNotificationFilter.bug_subscription_filter_id),
            Join(StructuralSubscription,
                 BugSubscriptionFilter.structural_subscription_id ==
                    StructuralSubscription.id))
        if len(source_person_id_map) == 0:
            filter_data = []
        else:
            filter_data = source.find(
                (StructuralSubscription.subscriberID,
                 BugSubscriptionFilter.id,
                 BugSubscriptionFilter.description),
                In(BugNotificationFilter.bug_notification_id,
                   [notification.id for notification in notifications]),
                In(StructuralSubscription.subscriberID,
                   source_person_id_map.keys()))
        filter_ids = []
        # Record the filters for each source.
        for source_person_id, filter_id, filter_description in filter_data:
            source_person_id_map[source_person_id]['filters'][filter_id] = (
                filter_description)
            filter_ids.append(filter_id)

        # This is only necessary while production and sample data have
        # structural subscriptions without filters.  Assign the filters to
        # each recipient.
        no_filter_marker = -1

        for recipient_data in recipient_id_map.values():
            for source_person_id in recipient_data['source person ids']:
                recipient_data['filters'].update(
                    source_person_id_map[source_person_id]['filters']
                    or {no_filter_marker: None})
        if filter_ids:
            # Now we get the information about subscriptions that might be
            # filtered and take that into account.
            mute_data = store.find(
                (BugSubscriptionFilterMute.person_id,
                 BugSubscriptionFilterMute.filter_id),
                In(BugSubscriptionFilterMute.person_id,
                   recipient_id_map.keys()),
                In(BugSubscriptionFilterMute.filter_id, filter_ids))
            for person_id, filter_id in mute_data:
                if filter_id in recipient_id_map[person_id]['filters']:
                    del recipient_id_map[person_id]['filters'][filter_id]
                # This may look odd, but it's here to prevent members of
                # a team with a contact address still getting direct
                # email about a bug after they've muted the
                # subscription.
                if no_filter_marker in recipient_id_map[person_id]['filters']:
                    del recipient_id_map[
                        person_id]['filters'][no_filter_marker]
        # Now recipient_id_map has all the information we need.  Let's
        # build the final result and return it.
        result = {}
        for recipient_data in recipient_id_map.values():
            if recipient_data['filters']:
                filter_descriptions = [
                    description for description
                    in recipient_data['filters'].values() if description]
                filter_descriptions.sort()  # This is good for tests.
                result[recipient_data['principal']] = {
                    'sources': recipient_data['sources'],
                    'filter descriptions': filter_descriptions}
        return result