def lessThanOrGreaterThanExpression(self, expressions, memos): """Return an SQL expression "(expressions) OP (memos)". OP is >, if the elements of expressions are PropertyColumns; else the elements of expressions are instances of Desc(PropertyColumn) and OP is <. """ descending = isinstance(expressions[0], Desc) if descending: expressions = [expression.expr for expression in expressions] expressions = map(compile, expressions) expressions = ', '.join(expressions) memos = ', '.join(sqlvalues(*memos)) if descending: return SQL('(%s) < (%s)' % (expressions, memos)) else: return SQL('(%s) > (%s)' % (expressions, memos))
def getUnlandedSourceBranchRevisions(self): """See `IBranchMergeProposal`.""" store = Store.of(self) source = SQL("""source AS (SELECT BranchRevision.branch, BranchRevision.revision, Branchrevision.sequence FROM BranchRevision WHERE BranchRevision.branch = %s and BranchRevision.sequence IS NOT NULL ORDER BY BranchRevision.branch DESC, BranchRevision.sequence DESC LIMIT 10)""" % self.source_branch.id) where = SQL("""BranchRevision.revision NOT IN (SELECT revision from BranchRevision AS target where target.branch = %s and BranchRevision.revision = target.revision)""" % self.target_branch.id) using = SQL("""source as BranchRevision""") revisions = store.with_(source).using(using).find( BranchRevision, where) return list( revisions.order_by(Desc(BranchRevision.sequence)).config(limit=10))
def create(cls, branch_id): """See `IBranchDiffJobSource`.""" metadata = {'branch_id': branch_id} # The branch_job has a branch of None, as there is no branch left in # the database to refer to. start = SQL("CURRENT_TIMESTAMP AT TIME ZONE 'UTC' + '7 days'") branch_job = BranchJob( None, cls.class_job_type, metadata, scheduled_start=start) return cls(branch_job)
def sample_binary_packages(self): """See IDistributionSourcePackageRelease.""" #avoid circular imports. from lp.registry.model.distroseries import DistroSeries from lp.soyuz.model.distroarchseries import DistroArchSeries from lp.soyuz.model.distroseriespackagecache import ( DistroSeriesPackageCache) archive_ids = list(self.distribution.all_distro_archive_ids) result_row = ( SQL('DISTINCT ON(BinaryPackageName.name) 0 AS ignore'), BinaryPackagePublishingHistory, DistroSeriesPackageCache, BinaryPackageRelease, BinaryPackageName) tables = ( BinaryPackagePublishingHistory, Join( DistroArchSeries, DistroArchSeries.id == BinaryPackagePublishingHistory.distroarchseriesID), Join( DistroSeries, DistroArchSeries.distroseriesID == DistroSeries.id), Join( BinaryPackageRelease, BinaryPackageRelease.id == BinaryPackagePublishingHistory.binarypackagereleaseID), Join( BinaryPackageName, BinaryPackageName.id == BinaryPackageRelease.binarypackagenameID), Join( BinaryPackageBuild, BinaryPackageBuild.id == BinaryPackageRelease.buildID), LeftJoin( DistroSeriesPackageCache, And( DistroSeriesPackageCache.distroseries == DistroSeries.id, DistroSeriesPackageCache.archiveID.is_in(archive_ids), DistroSeriesPackageCache.binarypackagename == BinaryPackageName.id))) all_published = Store.of(self.distribution).using(*tables).find( result_row, DistroSeries.distribution == self.distribution, BinaryPackagePublishingHistory.archiveID.is_in(archive_ids), BinaryPackageBuild.source_package_release == self.sourcepackagerelease) all_published = all_published.order_by( BinaryPackageName.name) def make_dsb_package(row): publishing = row[1] package_cache = row[2] return DistroSeriesBinaryPackage( publishing.distroarchseries.distroseries, publishing.binarypackagerelease.binarypackagename, package_cache) return DecoratedResultSet(all_published, make_dsb_package)
def resetWatches(self, new_next_check=None): """See `IBugTracker`.""" if new_next_check is None: new_next_check = SQL( "now() at time zone 'UTC' + (random() * interval '1 day')") store = Store.of(self) store.find(BugWatch, BugWatch.bugtracker == self).set( next_check=new_next_check, lastchecked=None, last_error_type=None)
def table_exists(self, table_name): """Check if a table exists :param table_name: name of the table to check for :returns: ``True`` if the table exists """ res = self.execute( SQL("SELECT COUNT(relname) FROM pg_class WHERE relname = ?", # FIXME: Figure out why this is not comming as unicode (unicode(table_name), ))) return res.get_one()[0]
def _populatePermissionsCache(cls, permissions_cache, shared_artifact_info_types, grantee_ids, policies_by_id, persons_by_id): all_permission_term = SQL("bool_or(artifact IS NULL) as all") some_permission_term = SQL("bool_or(artifact IS NOT NULL) as some") constraints = [ cls.grantee_id.is_in(grantee_ids), cls.policy_id.is_in(policies_by_id.keys()) ] result_set = IStore(cls).find( (cls.grantee_id, cls.policy_id, all_permission_term, some_permission_term), *constraints).group_by(cls.grantee_id, cls.policy_id) for (person_id, policy_id, has_all, has_some) in result_set: person = persons_by_id[person_id] policy = policies_by_id[policy_id] permissions_cache[person][policy] = ( SharingPermission.ALL if has_all else SharingPermission.SOME) if has_some: shared_artifact_info_types[person].append(policy.type)
def authorCount(self): """See `IRevisionCollection`.""" # Revision authors that are linked to Launchpad people are only # counted once even if the revision text that they use in the commit # is different. author = Func('coalesce', RevisionAuthor.personID, SQL(0) - RevisionAuthor.id) expressions = [RevisionCache.revision_author == RevisionAuthor.id] expressions.extend(self._filter_expressions) result_set = self.store.find(author, expressions) result_set.config(distinct=True) return result_set.count()
def getReviewableTranslationFiles(self, no_older_than=None): """See `ITranslationsPerson`.""" if self.person.is_team: # A team as such does not work on translations. Skip the # search for ones the team has worked on. return [] with_statement = self._composePOFileReviewerCTEs(no_older_than) return Store.of(self.person).with_(with_statement).using( POFile, Join(POTemplate, And( POTemplate.id == POFile.potemplateID, POTemplate.iscurrent == True))).find( POFile, POFile.id.is_in(SQL('SELECT * FROM recent_pofiles')), POFile.unreviewed_count > 0, Or( SQL('(POTemplate.productseries, POFile.language) IN ' '(SELECT * FROM translatable_productseries)'), SQL('(POTemplate.distroseries, POFile.language) IN ' '(SELECT * FROM translatable_distroseries)'))).config( distinct=True).order_by(POFile.date_changed)
def getAlias(self, aliasid, token, path): """Returns a LibraryFileAlias, or raises LookupError. A LookupError is raised if no record with the given ID exists or if not related LibraryFileContent exists. :param aliasid: A `LibraryFileAlias` ID. :param token: The token for the file. If None no token is present. When a token is supplied, it is looked up with path. :param path: The path the request is for, unused unless a token is supplied; when supplied it must match the token. The value of path is expected to be that from a twisted request.args e.g. /foo/bar. """ restricted = self.restricted if token and path: # With a token and a path we may be able to serve restricted files # on the public port. if isinstance(token, Macaroon): # Macaroons have enough other constraints that they don't # need to be path-specific; it's simpler and faster to just # check the alias ID. token_ok = threads.blockingCallFromThread( default_reactor, self._verifyMacaroon, token, aliasid) else: # The URL-encoding of the path may have changed somewhere # along the line, so reencode it canonically. LFA.filename # can't contain slashes, so they're safe to leave unencoded. # And urllib.quote erroneously excludes ~ from its safe set, # while RFC 3986 says it should be unescaped and Chromium # forcibly decodes it in any URL that it sees. # # This needs to match url_path_quote. normalised_path = urllib.quote(urllib.unquote(path), safe='/~+') store = session_store() token_ok = not store.find( TimeLimitedToken, SQL("age(created) < interval '1 day'"), TimeLimitedToken.token == hashlib.sha256(token).hexdigest(), TimeLimitedToken.path == normalised_path).is_empty() store.reset() if token_ok: restricted = True else: raise LookupError("Token stale/pruned/path mismatch") alias = LibraryFileAlias.selectOne( And(LibraryFileAlias.id == aliasid, LibraryFileAlias.contentID == LibraryFileContent.q.id, LibraryFileAlias.restricted == restricted)) if alias is None: raise LookupError("No file alias with LibraryFileContent") return alias
def packagesetsForUploader(self, archive, person): """See `IArchivePermissionSet`.""" store = IStore(ArchivePermission) query = ''' SELECT ap.id FROM archivepermission ap, teamparticipation tp WHERE ap.person = tp.team AND tp.person = ? AND ap.archive = ? AND ap.packageset IS NOT NULL ''' query = SQL(query, (person.id, archive.id)) return store.find(ArchivePermission, ArchivePermission.id.is_in(query))
def test_sql_passed_through(self): # create() passes SQL() expressions through untouched. bug = self.factory.makeBug() person = self.factory.makePerson() [sub] = bulk.create( (BugSubscription.bug, BugSubscription.person, BugSubscription.subscribed_by, BugSubscription.date_created, BugSubscription.bug_notification_level), [(bug, person, person, SQL("CURRENT_TIMESTAMP AT TIME ZONE 'UTC'"), BugNotificationLevel.LIFECYCLE)], get_objects=True) self.assertEqual(get_transaction_timestamp(), sub.date_created)
def get_bugsummary_filter_for_user(user): """Build a Storm expression to filter BugSummary by visibility. :param user: The user for which visible rows should be calculated. :return: (with_clauses, where_clauses) """ # Admins get to see every bug, everyone else only sees bugs # viewable by them-or-their-teams. # Note that because admins can see every bug regardless of # subscription they will see rather inflated counts. Admins get to # deal. public_filter = And(BugSummary.viewed_by_id == None, BugSummary.access_policy_id == None) if user is None: return [], [public_filter] elif IPersonRoles(user).in_admin: return [], [] else: with_clauses = [ With( 'teams', Select(TeamParticipation.teamID, tables=[TeamParticipation], where=(TeamParticipation.personID == user.id))), With( 'policies', Select(AccessPolicyGrant.policy_id, tables=[AccessPolicyGrant], where=(AccessPolicyGrant.grantee_id.is_in( SQL("SELECT team FROM teams"))))), ] where_clauses = [ Or( public_filter, BugSummary.viewed_by_id.is_in(SQL("SELECT team FROM teams")), BugSummary.access_policy_id.is_in( SQL("SELECT policy FROM policies"))) ] return with_clauses, where_clauses
def test_restricted_with_expired_token(self): fileAlias, url = self.get_restricted_file_and_public_url() # We have the base url for a restricted file; grant access to it # for a short time. token = TimeLimitedToken.allocate(url) # But time has passed store = session_store() tokens = store.find( TimeLimitedToken, TimeLimitedToken.token == hashlib.sha256(token).hexdigest()) tokens.set( TimeLimitedToken.created == SQL("created - interval '1 week'")) # Now, as per test_restricted_no_token we should get a 404. self.require404(url, params={"token": token})
def search(self, user, text, limit): """See `IPillarSet`.""" # Avoid circular import. from lp.registry.model.product import get_precached_products if limit is None: limit = config.launchpad.default_batch_size # Pull out the licences as a subselect which is converted # into a PostgreSQL array so that multiple licences per product # can be retrieved in a single row for each product. result = self.build_search_query(user, text) # If the search text matches the name or title of the # Product, Project, or Distribution exactly, then this # row should get the highest search rank (9999999). # Each row in the PillarName table will join with only one # of either the Product, Project, or Distribution tables, # so the coalesce() is necessary to find the ts_rank() which # is not null. result.order_by( SQL(''' (CASE WHEN PillarName.name = lower(%(text)s) OR lower(Product.title) = lower(%(text)s) OR lower(Project.title) = lower(%(text)s) OR lower(Distribution.title) = lower(%(text)s) THEN 9999999 ELSE coalesce(ts_rank(Product.fti, ftq(%(text)s)), ts_rank(Project.fti, ftq(%(text)s)), ts_rank(Distribution.fti, ftq(%(text)s))) END) DESC, PillarName.name ''' % sqlvalues(text=text))) # People shouldn't be calling this method with too big limits longest_expected = 2 * config.launchpad.default_batch_size if limit > longest_expected: warnings.warn("The search limit (%s) was greater " "than the longest expected size (%s)" % (limit, longest_expected), stacklevel=2) pillars = [] products = [] for pillar_name, other, product, projectgroup, distro in ( result[:limit]): pillar = pillar_name.pillar if IProduct.providedBy(pillar): products.append(pillar) pillars.append(pillar) # Prefill pillar.product.licenses. get_precached_products(products, need_licences=True) return pillars
def _getSharedPillars(self, person, user, pillar_class, extra_filter=None): """Helper method for getSharedProjects and getSharedDistributions. pillar_class is either Product or Distribution. Products define the owner foreign key attribute as _owner so we need to account for that, but otherwise the logic is the same for both pillar types. """ if user is None: return [] store = IStore(AccessPolicyGrantFlat) roles = IPersonRoles(user) if roles.in_admin: filter = True else: with_statement = With("teams", Select(TeamParticipation.teamID, tables=TeamParticipation, where=TeamParticipation.person == user.id)) teams_sql = SQL("SELECT team from teams") store = store.with_(with_statement) if IProduct.implementedBy(pillar_class): ownerID = pillar_class._ownerID else: ownerID = pillar_class.ownerID filter = Or( extra_filter or False, ownerID.is_in(teams_sql), pillar_class.driverID.is_in(teams_sql)) tables = [ AccessPolicyGrantFlat, Join( AccessPolicy, AccessPolicyGrantFlat.policy_id == AccessPolicy.id)] if IProduct.implementedBy(pillar_class): access_policy_column = AccessPolicy.product_id else: access_policy_column = AccessPolicy.distribution_id result_set = store.find( pillar_class, pillar_class.id.is_in( Select( columns=access_policy_column, tables=tables, where=(AccessPolicyGrantFlat.grantee_id == person.id)) ), filter) return result_set
def sourcesIncluded(self, direct_inclusion=False): """See `IPackageset`.""" if direct_inclusion == False: source_name_query = ''' SELECT pss.sourcepackagename FROM packagesetsources pss, flatpackagesetinclusion fpsi WHERE pss.packageset = fpsi.child AND fpsi.parent = ? ''' else: source_name_query = ''' SELECT pss.sourcepackagename FROM packagesetsources pss WHERE pss.packageset = ? ''' store = IStore(Packageset) source_names = SQL(source_name_query, (self.id,)) result_set = store.find( SourcePackageName, SourcePackageName.id.is_in(source_names)) return _order_result_set(result_set)
def packagesetsForSourceUploader(self, archive, sourcepackagename, person): """See `IArchivePermissionSet`.""" sourcepackagename = self._nameToSourcePackageName(sourcepackagename) store = IStore(ArchivePermission) query = ''' SELECT ap.id FROM archivepermission ap, teamparticipation tp, packagesetsources pss, flatpackagesetinclusion fpsi WHERE ap.person = tp.team AND tp.person = ? AND ap.packageset = fpsi.parent AND pss.packageset = fpsi.child AND pss.sourcepackagename = ? AND ap.archive = ? ''' query = SQL(query, (person.id, sourcepackagename.id, archive.id)) return store.find(ArchivePermission, ArchivePermission.id.is_in(query))
def load_teams_and_permissions(grantees): # We now have the grantees we want in the result so load any # associated team memberships and permissions and cache them. if permissions_cache: return store = IStore(cls) for grantee in grantees: grantees_by_id[grantee[0].id] = grantee[0] # Find any teams associated with the grantees. If grantees is a # sliced list (for batching), it may contain indirect grantees but # not the team they belong to so that needs to be fixed below. with_expr = With( "grantees", store.find(cls.grantee_id, cls.policy_id.is_in(policies_by_id.keys())).config( distinct=True)._get_select()) result_set = store.with_(with_expr).find( (TeamParticipation.teamID, TeamParticipation.personID), TeamParticipation.personID.is_in(grantees_by_id.keys()), TeamParticipation.teamID.is_in( Select((SQL("grantees.grantee"), ), tables="grantees", distinct=True))) team_ids = set() direct_grantee_ids = set() for team_id, team_member_id in result_set: if team_member_id == team_id: direct_grantee_ids.add(team_member_id) else: via_teams_cache[team_member_id].append(team_id) team_ids.add(team_id) # Remove from the via_teams cache all the direct grantees. for direct_grantee_id in direct_grantee_ids: if direct_grantee_id in via_teams_cache: del via_teams_cache[direct_grantee_id] # Load and cache the additional required teams. persons = store.find(Person, Person.id.is_in(team_ids)) for person in persons: grantees_by_id[person.id] = person cls._populatePermissionsCache(permissions_cache, shared_artifact_info_types, grantees_by_id.keys(), policies_by_id, grantees_by_id)
def _getDirectAndDuplicateSubscriptions(self, person, bug): # Fetch all information for direct and duplicate # subscriptions (including indirect through team # membership) in a single query. with_statement = generate_subscription_with(bug, person) info = Store.of(person).with_(with_statement).find( (BugSubscription, Bug, Person), BugSubscription.id.is_in( SQL('SELECT bugsubscriptions.id FROM bugsubscriptions')), Person.id == BugSubscription.person_id, Bug.id == BugSubscription.bug_id) direct = RealSubscriptionInfoCollection( self.person, self.administrated_team_ids) duplicates = RealSubscriptionInfoCollection( self.person, self.administrated_team_ids) bugs = set() for subscription, subscribed_bug, subscriber in info: bugs.add(subscribed_bug) if subscribed_bug.id != bug.id: # This is a subscription through a duplicate. collection = duplicates else: # This is a direct subscription. collection = direct collection.add( subscriber, subscribed_bug, subscription) # Preload bug owners, then all pillars. list(getUtility(IPersonSet).getPrecachedPersonsFromIDs( [bug.ownerID for bug in bugs])) all_tasks = [task for task in bug.bugtasks for bug in bugs] load_related(Product, all_tasks, ['productID']) load_related(Distribution, all_tasks, ['distributionID']) for bug in bugs: # indicate the reporter and bug_supervisor duplicates.annotateReporter(bug, bug.owner) direct.annotateReporter(bug, bug.owner) for task in all_tasks: # Get bug_supervisor. duplicates.annotateBugTaskResponsibilities( task, task.pillar, task.pillar.bug_supervisor) direct.annotateBugTaskResponsibilities( task, task.pillar, task.pillar.bug_supervisor) return (direct, duplicates)
def uploadersForPackageset(self, archive, packageset, direct_permissions=True): """See `IArchivePermissionSet`.""" store = IStore(ArchivePermission) if direct_permissions == True: query = ''' SELECT ap.id FROM archivepermission ap WHERE ap.packageset = ? ''' else: query = ''' SELECT ap.id FROM archivepermission ap, flatpackagesetinclusion fpsi WHERE fpsi.child = ? AND ap.packageset = fpsi.parent ''' query += " AND ap.archive = ?" query = SQL(query, (packageset.id, archive.id)) return store.find(ArchivePermission, ArchivePermission.id.is_in(query))
def setsIncluded(self, direct_inclusion=False): """See `IPackageset`.""" if direct_inclusion == False: # The very last clause in the query is necessary because each # package set is also a successor of itself in the flattened # hierarchy. query = ''' SELECT fpsi.child FROM flatpackagesetinclusion fpsi WHERE fpsi.parent = ? AND fpsi.child != ? ''' params = (self.id, self.id) else: query = ''' SELECT psi.child FROM packagesetinclusion psi WHERE psi.parent = ? ''' params = (self.id,) store = IStore(Packageset) successors = SQL(query, params) result_set = store.find(Packageset, Packageset.id.is_in(successors)) return _order_result_set(result_set)
def build_search_query(self, user, text): """Query parameters shared by search() and count_search_matches(). :returns: Storm ResultSet object """ # These classes are imported in this method to prevent an import loop. from lp.registry.model.product import Product, ProductSet from lp.registry.model.projectgroup import ProjectGroup from lp.registry.model.distribution import Distribution OtherPillarName = ClassAlias(PillarName) origin = [ PillarName, LeftJoin(OtherPillarName, PillarName.alias_for == OtherPillarName.id), LeftJoin(Product, PillarName.product == Product.id), LeftJoin(ProjectGroup, PillarName.projectgroup == ProjectGroup.id), LeftJoin(Distribution, PillarName.distribution == Distribution.id), ] conditions = SQL(''' PillarName.active = TRUE AND (PillarName.name = lower(%(text)s) OR Product.fti @@ ftq(%(text)s) OR lower(Product.title) = lower(%(text)s) OR Project.fti @@ ftq(%(text)s) OR lower(Project.title) = lower(%(text)s) OR Distribution.fti @@ ftq(%(text)s) OR lower(Distribution.title) = lower(%(text)s) ) ''' % sqlvalues(text=ensure_unicode(text))) columns = [ PillarName, OtherPillarName, Product, ProjectGroup, Distribution ] return IStore(PillarName).using(*origin).find( tuple(columns), And(conditions, ProductSet.getProductPrivacyFilter(user)))
def _prepare_result_set(self): store = self._cls._get_store() args = [] if self._clause: args.append(self._clause) for key, value in self._by.items(): args.append(getattr(self._cls, key) == value) tables = [] if self._clauseTables is not None: tables.extend(self._clauseTables) if not (self._prejoins or self._prejoinClauseTables): find_spec = self._cls else: find_spec = [self._cls] if self._prejoins: already_prejoined = {} last_prejoin = 0 join = self._cls for prejoin_path in self._prejoins: local_cls = self._cls path = () for prejoin_attr in prejoin_path.split("."): path += (prejoin_attr, ) # If we've already prejoined this column, we're done. if path in already_prejoined: local_cls = already_prejoined[path] continue # Otherwise, join the table relation = getattr(local_cls, prejoin_attr)._relation last_prejoin += 1 remote_cls = ClassAlias(relation.remote_cls, '_prejoin%d' % last_prejoin) join_expr = join_aliased_relation( local_cls, remote_cls, relation) join = LeftJoin(join, remote_cls, join_expr) find_spec.append(remote_cls) already_prejoined[path] = remote_cls local_cls = remote_cls if join is not self._cls: tables.append(join) if self._prejoinClauseTables: property_registry = self._cls._storm_property_registry for table in self._prejoinClauseTables: cls = property_registry.get("<table %s>" % table).cls find_spec.append(cls) find_spec = tuple(find_spec) if tables: # If we are adding extra tables, make sure the main table # is included. tables.insert(0, self._cls.__storm_table__) # Inject an AutoTables expression with a dummy true value to # be ANDed in the WHERE clause, so that we can introduce our # tables into the dynamic table handling of Storm without # disrupting anything else. args.append(AutoTables(SQL("1=1"), tables)) if self._selectAlso is not None: if type(find_spec) is not tuple: find_spec = (find_spec, SQL(self._selectAlso)) else: find_spec += (SQL(self._selectAlso), ) return store.find(find_spec, *args)
def rank_by_fti(table, text, ftq=True): table, query_fragment = determine_table_and_fragment(table, ftq) return SQL( '-rank(%s.fti, %s)' % (table.name, query_fragment), params=(text,), tables=(table,))
# Copyright 2009 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Database constants.""" from storm.expr import SQL UTC_NOW = SQL("CURRENT_TIMESTAMP AT TIME ZONE 'UTC'") DEFAULT = SQL("DEFAULT") # We can't use infinity, as psycopg doesn't know how to handle it. And # neither does Python I guess. #NEVER_EXPIRES = SQL("'infinity'::TIMESTAMP") NEVER_EXPIRES = SQL("'3000-01-01'::TIMESTAMP WITHOUT TIME ZONE") THIRTY_DAYS_AGO = SQL( "CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval '30 days'") SEVEN_DAYS_AGO = SQL( "CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval '7 days'") ONE_DAY_AGO = SQL("CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval '1 day'")
def __init__(self, table, column): SQL.__init__(self, '%s.%s' % (table, column))
def fti_search(table, text, ftq=True): """An expression ensuring that table rows match the specified text.""" table, query_fragment = determine_table_and_fragment(table, ftq) return SQL( '%s.fti @@ %s' % (table.name, query_fragment), params=(text,), tables=(table,))
def sum(self, attribute): if isinstance(attribute, basestring): attribute = SQL(attribute) result_set = self._without_prejoins()._result_set return result_set.sum(attribute)
def _composePOFileReviewerCTEs(self, no_older_than): """Compose Storm CTEs for common `POFile` queries. Returns a list of Storm CTEs, much the same as _composePOFileReviewerJoins.""" clause = [ POFileTranslator.personID == self.person.id, POFile.language != getUtility(ILaunchpadCelebrities).english] if no_older_than: clause.append(POFileTranslator.date_last_touched >= no_older_than) RecentPOFiles = With("recent_pofiles", Select( (POFile.id,), tables=[ POFileTranslator, Join(POFile, POFileTranslator.pofile == POFile.id)], where=And(*clause))) ReviewableGroups = With("reviewable_groups", Select( (TranslationGroup.id, Translator.languageID), tables=[ TranslationGroup, Join( Translator, Translator.translationgroupID == TranslationGroup.id), Join( TeamParticipation, And( TeamParticipation.teamID == Translator.translatorID, TeamParticipation.personID == self.person.id))])) TranslatableDistroSeries = With("translatable_distroseries", Select( (DistroSeries.id, SQL('reviewable_groups.language')), tables=[ DistroSeries, Join( Distribution, And( Distribution.id == DistroSeries.distributionID, Distribution.translations_usage == ServiceUsage.LAUNCHPAD, Distribution.translation_focusID == DistroSeries.id)), Join( SQL('reviewable_groups'), SQL('reviewable_groups.id') == Distribution.translationgroupID)])) TranslatableProductSeries = With("translatable_productseries", Select( (ProductSeries.id, SQL('reviewable_groups.language')), tables=[ ProductSeries, Join( Product, And( Product.id == ProductSeries.productID, Product.translations_usage == ServiceUsage.LAUNCHPAD, Product.active == True)), LeftJoin( ProjectGroup, ProjectGroup.id == Product.projectID), Join( SQL('reviewable_groups'), SQL('reviewable_groups.id') == Product.translationgroupID)])) return [ RecentPOFiles, ReviewableGroups, TranslatableDistroSeries, TranslatableProductSeries]