Пример #1
0
def _ProcessStatusSD(fmt, harmonized_statuses):
    """Convert a 'status' sort directive into SQL."""
    left_joins = []
    # Note: status_def_rows are already ordered by REVERSED rank.
    wk_status_ids = [
        stat_id for stat_id, rank, _ in harmonized_statuses if rank is not None
    ]
    odd_status_ids = [
        stat_id for stat_id, rank, _ in harmonized_statuses if rank is None
    ]
    wk_status_ph = sql.PlaceHolders(wk_status_ids)
    # Even though oddball statuses sort lexographically, use FIELD to determine
    # the order so that the database sorts ints rather than strings for speed.
    odd_status_ph = sql.PlaceHolders(odd_status_ids)

    order_by = []  # appended to below: both well-known and oddball can apply
    sort_col = ('IF(ISNULL(Issue.status_id), Issue.derived_status_id, '
                'Issue.status_id)')
    # Reverse sort by using rev_sort_dir because we want NULLs at the end.
    if wk_status_ids:
        order_by.append(
            (fmt('FIELD({sort_col}, {wk_status_ph}) {rev_sort_dir}',
                 sort_col=sort_col,
                 wk_status_ph=wk_status_ph), wk_status_ids))
    if odd_status_ids:
        order_by.append(
            (fmt('FIELD({sort_col}, {odd_status_ph}) {rev_sort_dir}',
                 sort_col=sort_col,
                 odd_status_ph=odd_status_ph), odd_status_ids))

    return left_joins, order_by
Пример #2
0
    def RecordManualIssueVerdicts(self, cnxn, issue_service, issues, user_id,
                                  is_spam):
        rows = [(user_id, issue.issue_id, is_spam, REASON_MANUAL,
                 issue.project_id) for issue in issues]
        issue_ids = [issue.issue_id for issue in issues]

        # Overrule all previous verdicts.
        self.verdict_tbl.Update(
            cnxn, {'overruled': True},
            [('issue_id IN (%s)' % sql.PlaceHolders(issue_ids), issue_ids)],
            commit=False)

        self.verdict_tbl.InsertRows(cnxn,
                                    MANUALVERDICT_ISSUE_COLS,
                                    rows,
                                    ignore=True)

        for issue in issues:
            issue.is_spam = is_spam

        if is_spam:
            self.issue_actions.increment_by(len(issues), {'type': 'manual'})
        else:
            issue_service.AllocateNewLocalIDs(cnxn, issues)

        # This will commit the transaction.
        issue_service.UpdateIssues(cnxn, issues, update_cols=['is_spam'])
Пример #3
0
def _CustomFieldSortClauses(fd_list, value_type, value_column, alias,
                            sort_dir):
    """Give LEFT JOIN and ORDER BY terms for custom fields of the given type."""
    relevant_fd_list = [fd for fd in fd_list if fd.field_type == value_type]
    if not relevant_fd_list:
        return [], []

    field_ids_ph = sql.PlaceHolders(relevant_fd_list)

    def Fmt(sql_str):
        return sql_str.format(value_column=value_column,
                              sort_dir=sort_dir,
                              field_ids_ph=field_ids_ph,
                              alias=alias + '_' + value_column)

    left_joins = [
        (Fmt('Issue2FieldValue AS {alias} ON Issue.id = {alias}.issue_id '
             'AND {alias}.field_id IN ({field_ids_ph})'),
         [fd.field_id for fd in relevant_fd_list])
    ]

    if value_type == tracker_pb2.FieldTypes.USER_TYPE:
        left_joins.append((Fmt(
            'User AS {alias}_user ON {alias}.user_id = {alias}_user.user_id'),
                           []))
        order_by = [(Fmt('ISNULL({alias}_user.email) {sort_dir}'), []),
                    (Fmt('{alias}_user.email {sort_dir}'), [])]
    else:
        # Unfortunately, this sorts on the best field value, not all of them.
        order_by = [(Fmt('ISNULL({alias}.{value_column}) {sort_dir}'), []),
                    (Fmt('{alias}.{value_column} {sort_dir}'), [])]

    return left_joins, order_by
Пример #4
0
def _LabelSortClauses(sd, harmonized_labels, fmt):
    """Give LEFT JOIN and ORDER BY terms for label sort directives."""
    # Note: derived labels should work automatically.

    # label_def_rows are already ordered by REVERSED rank.
    wk_label_ids = [
        label_id for label_id, rank, label in harmonized_labels
        if label.lower().startswith('%s-' % sd) and rank is not None
    ]
    odd_label_ids = [
        label_id for label_id, rank, label in harmonized_labels
        if label.lower().startswith('%s-' % sd) and rank is None
    ]
    all_label_ids = wk_label_ids + odd_label_ids

    if all_label_ids:
        left_joins = [(fmt(
            'Issue2Label AS {alias} ON Issue.id = {alias}.issue_id '
            'AND {alias}.label_id IN ({all_label_ph})',
            all_label_ph=sql.PlaceHolders(all_label_ids)), all_label_ids)]
    else:
        left_joins = []

    order_by = []
    # Reverse sort by using rev_sort_dir because we want NULLs at the end.
    if wk_label_ids:
        order_by.append(
            (fmt('FIELD({alias}.label_id, {wk_label_ph}) {rev_sort_dir}',
                 wk_label_ph=sql.PlaceHolders(wk_label_ids)), wk_label_ids))
    if odd_label_ids:
        # Even though oddball labels sort lexographically, use FIELD to determine
        # the order so that the database sorts ints rather than strings for speed
        order_by.append(
            (fmt('FIELD({alias}.label_id, {odd_label_ph}) {rev_sort_dir}',
                 odd_label_ph=sql.PlaceHolders(odd_label_ids)), odd_label_ids))

    return left_joins, order_by
Пример #5
0
def _Compare(alias, op, val_type, col, vals):
  """Return an SQL comparison for the given values. For use in WHERE or ON.

  Args:
    alias: String name of the table or alias defined in a JOIN clause.
    op: One of the operators defined in ast_pb2.py.
    val_type: One of the value types defined in ast_pb2.py.
    col: string column name to compare to vals.
    vals: list of values that the user is searching for.

  Returns:
    (cond_str, cond_args) where cond_str is a SQL condition that may contain
    some %s placeholders, and cond_args is the list of values that fill those
    placeholders.  If the condition string contains any AND or OR operators,
    the whole expression is put inside parens.

  Raises:
    NoPossibleResults: The user's query is impossible to ever satisfy, e.g.,
        it requires matching an empty set of labels.
  """
  vals_ph = sql.PlaceHolders(vals)
  if col in ['label', 'status', 'email', 'name']:
    alias_col = 'LOWER(%s.%s)' % (alias, col)
  else:
    alias_col = '%s.%s' % (alias, col)

  def Fmt(cond_str):
    return cond_str.format(alias_col=alias_col, vals_ph=vals_ph)

  no_value = (0 if val_type in [tracker_pb2.FieldTypes.DATE_TYPE,
                                tracker_pb2.FieldTypes.INT_TYPE] else '')
  if op == ast_pb2.QueryOp.IS_DEFINED:
    return Fmt('({alias_col} IS NOT NULL AND {alias_col} != %s)'), [no_value]
  if op == ast_pb2.QueryOp.IS_NOT_DEFINED:
    return Fmt('({alias_col} IS NULL OR {alias_col} = %s)'), [no_value]

  if val_type in [tracker_pb2.FieldTypes.DATE_TYPE,
                  tracker_pb2.FieldTypes.INT_TYPE]:
    if op == ast_pb2.QueryOp.TEXT_HAS:
      op = ast_pb2.QueryOp.EQ
    if op == ast_pb2.QueryOp.NOT_TEXT_HAS:
      op = ast_pb2.QueryOp.NE

  if op == ast_pb2.QueryOp.EQ:
    if not vals:
      raise NoPossibleResults('Column %s has no possible value' % alias_col)
    elif len(vals) == 1:
      cond_str = Fmt('{alias_col} = %s')
    else:
      cond_str = Fmt('{alias_col} IN ({vals_ph})')
    return cond_str, vals

  if op == ast_pb2.QueryOp.NE:
    if not vals:
      return 'TRUE', []  # a no-op that matches every row.
    elif len(vals) == 1:
      comp = Fmt('{alias_col} != %s')
    else:
      comp = Fmt('{alias_col} NOT IN ({vals_ph})')
    return '(%s IS NULL OR %s)' % (alias_col, comp), vals

  wild_vals = ['%%%s%%' % val for val in vals]
  if op == ast_pb2.QueryOp.TEXT_HAS:
    cond_str = ' OR '.join(Fmt('{alias_col} LIKE %s') for v in vals)
    return ('(%s)' % cond_str), wild_vals
  if op == ast_pb2.QueryOp.NOT_TEXT_HAS:
    cond_str = (Fmt('{alias_col} IS NULL OR ') +
                ' AND '.join(Fmt('{alias_col} NOT LIKE %s') for v in vals))
    return ('(%s)' % cond_str), wild_vals


  # Note: These operators do not support quick-OR
  val = vals[0]

  if op == ast_pb2.QueryOp.GT:
    return Fmt('{alias_col} > %s'), [val]
  if op == ast_pb2.QueryOp.LT:
    return Fmt('{alias_col} < %s'), [val]
  if op == ast_pb2.QueryOp.GE:
    return Fmt('{alias_col} >= %s'), [val]
  if op == ast_pb2.QueryOp.LE:
    return Fmt('{alias_col} <= %s'), [val]

  logging.error('unknown op: %r', op)
Пример #6
0
def GatherUpdatesData(services,
                      mr,
                      prof,
                      project_ids=None,
                      user_ids=None,
                      ending=None,
                      updates_page_url=None,
                      autolink=None,
                      highlight=None):
    """Gathers and returns updates data.

  Args:
    services: Connections to backend services.
    mr: HTTP request info, used by the artifact autolink.
    prof: The profiler to use.
    project_ids: List of project IDs we want updates for.
    user_ids: List of user IDs we want updates for.
    ending: Ending type for activity titles, 'in_project' or 'by_user'.
    updates_page_url: The URL that will be used to create pagination links from.
    autolink: Autolink instance.
    highlight: What to highlight in the middle column on user updates pages
        i.e. 'project', 'user', or None.
  """
    ascending = bool(mr.after)

    # num should be non-negative number
    num = mr.GetPositiveIntParam('num', UPDATES_PER_PAGE)
    num = min(num, MAX_UPDATES_PER_PAGE)

    updates_data = {
        'no_stars': None,
        'no_activities': None,
        'pagination': None,
        'updates_data': None,
        'ending_type': ending,
    }

    if not user_ids and not project_ids:
        updates_data['no_stars'] = ezt.boolean(True)
        return updates_data

    with prof.Phase('get activities'):
        # TODO(jrobbins): make this into a persist method.
        # TODO(jrobbins): this really needs permission checking in SQL, which will
        # be slow.
        where_conds = [('Issue.id = Comment.issue_id', [])]
        if project_ids is not None:
            cond_str = 'Comment.project_id IN (%s)' % sql.PlaceHolders(
                project_ids)
            where_conds.append((cond_str, project_ids))
        if user_ids is not None:
            cond_str = 'Comment.commenter_id IN (%s)' % sql.PlaceHolders(
                user_ids)
            where_conds.append((cond_str, user_ids))

        if project_ids:
            use_clause = 'USE INDEX (project_id) USE INDEX FOR ORDER BY (project_id)'
        elif user_ids:
            use_clause = (
                'USE INDEX (commenter_id) USE INDEX FOR ORDER BY (commenter_id)'
            )
        else:
            use_clause = ''

        if mr.before:
            where_conds.append(('created < %s', [mr.before]))
        if mr.after:
            where_conds.append(('created > %s', [mr.after]))
        if ascending:
            order_by = [('created', [])]
        else:
            order_by = [('created DESC', [])]

        comments = services.issue.GetComments(mr.cnxn,
                                              joins=[('Issue', [])],
                                              deleted_by=None,
                                              where=where_conds,
                                              use_clause=use_clause,
                                              order_by=order_by,
                                              limit=num + 1)

        # TODO(jrobbins): it would be better if we could just get the dict directly.
        prefetched_issues_list = services.issue.GetIssues(
            mr.cnxn, {c.issue_id
                      for c in comments})
        prefetched_issues = {
            issue.issue_id: issue
            for issue in prefetched_issues_list
        }
        needed_project_ids = {
            issue.project_id
            for issue in prefetched_issues_list
        }
        prefetched_projects = services.project.GetProjects(
            mr.cnxn, needed_project_ids)
        prefetched_configs = services.config.GetProjectConfigs(
            mr.cnxn, needed_project_ids)
        viewable_issues_list = tracker_helpers.FilterOutNonViewableIssues(
            mr.auth.effective_ids, mr.auth.user_pb, prefetched_projects,
            prefetched_configs, prefetched_issues_list)
        viewable_iids = {issue.issue_id for issue in viewable_issues_list}

        # Filter the comments based on permission to view the issue.
        # TODO(jrobbins): push permission checking in the query so that pagination
        # pages never become underfilled, or use backends to shard.
        # TODO(jrobbins): come back to this when I implement private comments.
        comments = [c for c in comments if c.issue_id in viewable_iids]

        if ascending:
            comments.reverse()

    amendment_user_ids = []
    for comment in comments:
        for amendment in comment.amendments:
            amendment_user_ids.extend(amendment.added_user_ids)
            amendment_user_ids.extend(amendment.removed_user_ids)

    users_by_id = framework_views.MakeAllUserViews(
        mr.cnxn, services.user, [c.user_id for c in comments],
        amendment_user_ids)
    framework_views.RevealAllEmailsToMembers(mr, users_by_id)

    num_results_returned = len(comments)
    displayed_activities = comments[:UPDATES_PER_PAGE]

    if not num_results_returned:
        updates_data['no_activities'] = ezt.boolean(True)
        return updates_data

    # Get all referenced artifacts first
    all_ref_artifacts = None
    if autolink is not None:
        content_list = []
        for activity in comments:
            content_list.append(activity.content)

        all_ref_artifacts = autolink.GetAllReferencedArtifacts(
            mr, content_list)

    # Now process content and gather activities
    today = []
    yesterday = []
    pastweek = []
    pastmonth = []
    thisyear = []
    older = []

    with prof.Phase('rendering activities'):
        for activity in displayed_activities:
            entry = ActivityView(activity,
                                 services,
                                 mr,
                                 prefetched_issues,
                                 users_by_id,
                                 autolink=autolink,
                                 all_ref_artifacts=all_ref_artifacts,
                                 ending=ending,
                                 highlight=highlight)

            if entry.date_bucket == 'Today':
                today.append(entry)
            elif entry.date_bucket == 'Yesterday':
                yesterday.append(entry)
            elif entry.date_bucket == 'Last 7 days':
                pastweek.append(entry)
            elif entry.date_bucket == 'Last 30 days':
                pastmonth.append(entry)
            elif entry.date_bucket == 'Earlier this year':
                thisyear.append(entry)
            elif entry.date_bucket == 'Before this year':
                older.append(entry)

    new_after = None
    new_before = None
    if displayed_activities:
        new_after = displayed_activities[0].timestamp
        new_before = displayed_activities[-1].timestamp

    prev_url = None
    next_url = None
    if updates_page_url:
        list_servlet_rel_url = updates_page_url.split('/')[-1]
        if displayed_activities and (mr.before or mr.after):
            prev_url = framework_helpers.FormatURL(mr,
                                                   list_servlet_rel_url,
                                                   after=new_after)
        if mr.after or len(comments) > UPDATES_PER_PAGE:
            next_url = framework_helpers.FormatURL(mr,
                                                   list_servlet_rel_url,
                                                   before=new_before)

    if prev_url or next_url:
        pagination = template_helpers.EZTItem(start=None,
                                              last=None,
                                              prev_url=prev_url,
                                              next_url=next_url,
                                              reload_url=None,
                                              visible=ezt.boolean(True),
                                              total_count=None)
    else:
        pagination = None

    updates_data.update({
        'no_activities':
        ezt.boolean(False),
        'pagination':
        pagination,
        'updates_data':
        template_helpers.EZTItem(today=today,
                                 yesterday=yesterday,
                                 pastweek=pastweek,
                                 pastmonth=pastmonth,
                                 thisyear=thisyear,
                                 older=older),
    })

    return updates_data
Пример #7
0
    def QueryIssueSnapshots(self,
                            cnxn,
                            services,
                            unixtime,
                            effective_ids,
                            project,
                            perms,
                            group_by=None,
                            label_prefix=None,
                            query=None,
                            canned_query=None):
        """Queries historical issue counts grouped by label or component.

    Args:
      cnxn: A MonorailConnection instance.
      services: A Services instance.
      unixtime: An integer representing the Unix time in seconds.
      effective_ids: The effective User IDs associated with the current user.
      project: A project object representing the current project.
      perms: A permissions object associated with the current user.
      group_by (str, optional): Which dimension to group by. Values can
        be 'label', 'component', or None, in which case no grouping will
        be applied.
      label_prefix: Required when group_by is 'label.' Will limit the query to
        only labels with the specified prefix (for example 'Pri').
      query (str, optional): A query string from the request to apply to
        the snapshot query.
      canned_query (str, optional): Parsed canned query applied to the query
        scope.

    Returns:
      1. A dict of {'2nd dimension or "total"': number of occurences}.
      2. A list of any unsupported query conditions in query.
      3. A boolean that is true if any results were capped.
    """
        project_config = services.config.GetProjectConfig(
            cnxn, project.project_id)
        try:
            query_left_joins, query_where, unsupported_conds = self._QueryToWhere(
                cnxn, services, project_config, query, canned_query, project)
        except ast2select.NoPossibleResults:
            return {}, ['Invalid query.'], False

        restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
            cnxn, None, self.config_service, effective_ids, project, perms)

        left_joins = [
            ('Issue ON IssueSnapshot.issue_id = Issue.id', []),
        ]

        if restricted_label_ids:
            left_joins.append((('Issue2Label AS Forbidden_label'
                                ' ON Issue.id = Forbidden_label.issue_id'
                                ' AND Forbidden_label.label_id IN (%s)' %
                                (sql.PlaceHolders(restricted_label_ids))),
                               restricted_label_ids))

        if effective_ids:
            left_joins.append(
                ('Issue2Cc AS I2cc'
                 ' ON Issue.id = I2cc.issue_id'
                 ' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids),
                 effective_ids))

        # TODO(jeffcarp): Handle case where there are issues with no labels.
        where = [
            ('IssueSnapshot.period_start <= %s', [unixtime]),
            ('IssueSnapshot.period_end > %s', [unixtime]),
            ('IssueSnapshot.project_id = %s', [project.project_id]),
            ('Issue.is_spam = %s', [False]),
            ('Issue.deleted = %s', [False]),
        ]

        forbidden_label_clause = 'Forbidden_label.label_id IS NULL'
        if effective_ids:
            if restricted_label_ids:
                forbidden_label_clause = ' OR %s' % forbidden_label_clause
            else:
                forbidden_label_clause = ''

            where.append(
                (('(Issue.reporter_id IN (%s)'
                  ' OR Issue.owner_id IN (%s)'
                  ' OR I2cc.cc_id IS NOT NULL'
                  '%s)') %
                 (sql.PlaceHolders(effective_ids),
                  sql.PlaceHolders(effective_ids), forbidden_label_clause),
                 list(effective_ids) + list(effective_ids)))
        else:
            where.append((forbidden_label_clause, []))

        if group_by == 'component':
            cols = ['Comp.path', 'IssueSnapshot.issue_id']
            left_joins.extend([
                (('IssueSnapshot2Component AS Is2c ON'
                  ' Is2c.issuesnapshot_id = IssueSnapshot.id'), []),
                ('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []),
            ])
            group_by = ['Comp.path']
        elif group_by == 'label':
            cols = ['Lab.label', 'IssueSnapshot.issue_id']
            left_joins.extend([
                (('IssueSnapshot2Label AS Is2l'
                  ' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []),
                ('LabelDef AS Lab ON Lab.id = Is2l.label_id', []),
            ])

            if not label_prefix:
                raise ValueError(
                    '`label_prefix` required when grouping by label.')

            # TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output,
            # ensure regex is case-insensitive.
            where.append(
                ('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%']))
            group_by = ['Lab.label']
        elif group_by == 'open':
            cols = ['IssueSnapshot.is_open', 'IssueSnapshot.issue_id']
            group_by = ['IssueSnapshot.is_open']
        elif group_by == 'status':
            left_joins.append(('StatusDef AS Stats ON ' \
              'Stats.id = IssueSnapshot.status_id', []))
            cols = ['Stats.status', 'IssueSnapshot.issue_id']
            group_by = ['Stats.status']
        elif group_by == 'owner':
            cols = ['IssueSnapshot.owner_id', 'IssueSnapshot.issue_id']
            group_by = ['IssueSnapshot.owner_id']
        elif not group_by:
            cols = ['IssueSnapshot.issue_id']
        else:
            raise ValueError('`group_by` must be label, component, ' \
              'open, status, owner or None.')

        if query_left_joins:
            left_joins.extend(query_left_joins)

        if query_where:
            where.extend(query_where)

        promises = []

        for shard_id in range(settings.num_logical_shards):
            count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols,
                                                             where=where,
                                                             joins=left_joins,
                                                             group_by=group_by,
                                                             shard_id=shard_id)
            promises.append(
                framework_helpers.Promise(cnxn.Execute,
                                          count_stmt,
                                          stmt_args,
                                          shard_id=shard_id))

        shard_values_dict = {}

        search_limit_reached = False

        for promise in promises:
            # Wait for each query to complete and add it to the dict.
            shard_values = list(promise.WaitAndGetValue())

            if not shard_values:
                continue
            if group_by:
                for name, count in shard_values:
                    if count >= settings.chart_query_max_rows:
                        search_limit_reached = True

                    shard_values_dict.setdefault(name, 0)
                    shard_values_dict[name] += count
            else:
                if shard_values[0][0] >= settings.chart_query_max_rows:
                    search_limit_reached = True

                shard_values_dict.setdefault('total', 0)
                shard_values_dict['total'] += shard_values[0][0]

        unsupported_field_names = list(
            set([
                field.field_name for cond in unsupported_conds
                for field in cond.field_defs
            ]))

        return shard_values_dict, unsupported_field_names, search_limit_reached
Пример #8
0
def SearchProjectCan(cnxn,
                     services,
                     project_ids,
                     query_ast,
                     shard_id,
                     harmonized_config,
                     left_joins=None,
                     where=None,
                     sort_directives=None,
                     query_desc=''):
    """Return a list of issue global IDs in the projects that satisfy the query.

  Args:
    cnxn: Regular database connection to the master DB.
    services: interface to issue storage backends.
    project_ids: list of int IDs of the project to search
    query_ast: A QueryAST PB with conjunctions and conditions.
    shard_id: limit search to the specified shard ID int.
    harmonized_config: harmonized config for all projects being searched.
    left_joins: SQL LEFT JOIN clauses that are needed in addition to
        anything generated from the query_ast.
    where: SQL WHERE clauses that are needed in addition to
        anything generated from the query_ast.
    sort_directives: list of strings specifying the columns to sort on.
    query_desc: descriptive string for debugging.

  Returns:
    (issue_ids, capped, error) where issue_ids is a list of issue issue_ids
    that satisfy the query, capped is True if the number of results were
    capped due to an implementation limit, and error is any well-known error
    (probably a query parsing error) encountered during search.
  """
    logging.info('searching projects %r for AST %r', project_ids, query_ast)
    start_time = time.time()
    left_joins = left_joins or []
    where = where or []
    if project_ids:
        cond_str = 'Issue.project_id IN (%s)' % sql.PlaceHolders(project_ids)
        where.append((cond_str, project_ids))

    try:
        query_ast = ast2ast.PreprocessAST(cnxn, query_ast, project_ids,
                                          services, harmonized_config)
        logging.info('simplified AST is %r', query_ast)
        query_left_joins, query_where, _ = ast2select.BuildSQLQuery(query_ast)
        left_joins.extend(query_left_joins)
        where.extend(query_where)
    except ast2ast.MalformedQuery as e:
        # TODO(jrobbins): inform the user that their query had invalid tokens.
        logging.info('Invalid query tokens %s.\n %r\n\n', e.message, query_ast)
        return [], False, e
    except ast2select.NoPossibleResults as e:
        # TODO(jrobbins): inform the user that their query was impossible.
        logging.info('Impossible query %s.\n %r\n\n', e.message, query_ast)
        return [], False, e
    logging.info('translated to left_joins %r', left_joins)
    logging.info('translated to where %r', where)

    fts_capped = False
    if query_ast.conjunctions:
        # TODO(jrobbins): Handle "OR" in queries.  For now, we just process the
        # first conjunction.
        assert len(query_ast.conjunctions) == 1
        conj = query_ast.conjunctions[0]
        full_text_iids, fts_capped = tracker_fulltext.SearchIssueFullText(
            project_ids, conj, shard_id)
        if full_text_iids is not None:
            if not full_text_iids:
                return [], False, None  # No match on fulltext, so don't bother DB.
            cond_str = 'Issue.id IN (%s)' % sql.PlaceHolders(full_text_iids)
            where.append((cond_str, full_text_iids))

    label_def_rows = []
    status_def_rows = []
    if sort_directives:
        if project_ids:
            for pid in project_ids:
                label_def_rows.extend(
                    services.config.GetLabelDefRows(cnxn, pid))
                status_def_rows.extend(
                    services.config.GetStatusDefRows(cnxn, pid))
        else:
            label_def_rows = services.config.GetLabelDefRowsAnyProject(cnxn)
            status_def_rows = services.config.GetStatusDefRowsAnyProject(cnxn)

    harmonized_labels = tracker_bizobj.HarmonizeLabelOrStatusRows(
        label_def_rows)
    harmonized_statuses = tracker_bizobj.HarmonizeLabelOrStatusRows(
        status_def_rows)
    harmonized_fields = harmonized_config.field_defs
    sort_left_joins, order_by = ast2sort.BuildSortClauses(
        sort_directives, harmonized_labels, harmonized_statuses,
        harmonized_fields)
    logging.info('translated to sort left_joins %r', sort_left_joins)
    logging.info('translated to order_by %r', order_by)

    issue_ids, db_capped = services.issue.RunIssueQuery(cnxn,
                                                        left_joins +
                                                        sort_left_joins,
                                                        where,
                                                        order_by,
                                                        shard_id=shard_id)
    logging.warn('executed "%s" query %r for %d issues in %dms', query_desc,
                 query_ast, len(issue_ids),
                 int((time.time() - start_time) * 1000))
    capped = fts_capped or db_capped
    return issue_ids, capped, None