예제 #1
0
    def testSavedQueryIDToCond(self):
        self.mox.StubOutWithMock(savedqueries_helpers, 'SavedQueryToCond')
        savedqueries_helpers.SavedQueryToCond(mox.IgnoreArg()).AndReturn('ret')
        self.mox.ReplayAll()
        query_cond = savedqueries_helpers.SavedQueryIDToCond(
            self.cnxn, self.features, 1)
        self.assertEquals('ret', query_cond)
        self.mox.VerifyAll()

        self.mox.StubOutWithMock(tracker_bizobj, 'GetBuiltInQuery')
        tracker_bizobj.GetBuiltInQuery(1).AndReturn('built_in_query')
        self.mox.ReplayAll()
        query_cond = savedqueries_helpers.SavedQueryIDToCond(
            self.cnxn, self.features, 1)
        self.assertEquals('built_in_query', query_cond)
        self.mox.VerifyAll()
예제 #2
0
    def _MakePromises(self):
        config_dict = self.services.config.GetProjectConfigs(
            self.mr.cnxn, self.query_project_ids)
        self.harmonized_config = tracker_bizobj.HarmonizeConfigs(
            list(config_dict.values()))

        self.canned_query = savedqueries_helpers.SavedQueryIDToCond(
            self.mr.cnxn, self.services.features, self.mr.can)

        self.canned_query, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
            self.me_user_ids, self.canned_query)
        self.mr.warnings.extend(warnings)
        self.user_query, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
            self.me_user_ids, self.mr.query)
        self.mr.warnings.extend(warnings)
        logging.debug('Searching query: %s %s', self.canned_query,
                      self.user_query)

        slice_term = ('Issue.shard = %s', [self.mr.shard_id])

        sd = sorting.ComputeSortDirectives(self.harmonized_config,
                                           self.mr.group_by_spec,
                                           self.mr.sort_spec)

        self.result_iids_promise = framework_helpers.Promise(
            _GetQueryResultIIDs, self.mr.cnxn, self.services,
            self.canned_query, self.user_query, self.query_project_ids,
            self.harmonized_config, sd, slice_term, self.mr.shard_id,
            self.mr.invalidation_timestep)
예제 #3
0
    def IssueSnapshot(self, mc, request):
        """Fetch IssueSnapshot counts for charting."""
        warnings = []

        if not request.timestamp:
            raise exceptions.InputException('Param `timestamp` required.')

        if not request.project_name:
            raise exceptions.InputException('Param `project_name` required.')

        if request.group_by == 'label' and not request.label_prefix:
            raise exceptions.InputException('Param `label_prefix` required.')

        if request.canned_query:
            canned_query = savedqueries_helpers.SavedQueryIDToCond(
                mc.cnxn, self.services.features, request.canned_query)
            # TODO(jrobbins): support linked accounts me_user_ids.
            canned_query, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
                [mc.auth.user_id], canned_query)
        else:
            canned_query = None

        if request.query:
            query, warnings = searchpipeline.ReplaceKeywordsWithUserIDs(
                [mc.auth.user_id], request.query)
        else:
            query = None

        with work_env.WorkEnv(mc, self.services) as we:
            project = we.GetProjectByName(request.project_name)
            results, unsupported_fields, limit_reached = we.SnapshotCountsQuery(
                project,
                request.timestamp,
                request.group_by,
                label_prefix=request.label_prefix,
                query=query,
                canned_query=canned_query)
        if request.group_by == 'owner':
            # Map user ids to emails.
            snapshot_counts = [
                issues_pb2.IssueSnapshotCount(
                    dimension=self.services.user.GetUser(mc.cnxn, key).email,
                    count=result) for key, result in results.iteritems()
            ]
        else:
            snapshot_counts = [
                issues_pb2.IssueSnapshotCount(dimension=key, count=result)
                for key, result in results.items()
            ]
        response = issues_pb2.IssueSnapshotResponse()
        response.snapshot_count.extend(snapshot_counts)
        response.unsupported_field.extend(unsupported_fields)
        response.unsupported_field.extend(warnings)
        response.search_limit_reached = limit_reached
        return response
예제 #4
0
def ParseQuery(mr, config, services):
  """Parse the user's query.

  Args:
    mr: commonly used info parsed from the request.
    config: The ProjectConfig PB for the project.
    services: connections to backends.

  Returns:
    A pair (ast, is_fulltext) with the parsed query abstract syntax tree
    and a boolean that is True if the query included any fulltext terms.
  """
  canned_query = savedqueries_helpers.SavedQueryIDToCond(
    mr.cnxn, services.features, mr.can)
  query_ast = query2ast.ParseUserQuery(
    mr.query, canned_query, query2ast.BUILTIN_ISSUE_FIELDS, config)

  is_fulltext_query = bool(
    query_ast.conjunctions and
    fulltext_helpers.BuildFTSQuery(
      query_ast.conjunctions[0], tracker_fulltext.ISSUE_FULLTEXT_FIELDS))

  return query_ast, is_fulltext_query
예제 #5
0
def _GetCachedSearchResults(mr, query_project_ids, needed_shard_keys,
                            harmonized_config, project_shard_timestamps,
                            services):
    """Return a dict of cached search results that are not already stale.

  If it were not for cross-project search, we would simply cache when we do a
  search and then invalidate when an issue is modified.  But, with
  cross-project search we don't know all the memcache entries that would
  need to be invalidated.  So, instead, we write the search result cache
  entries and then an initial modified_ts value for each project if it was
  not already there. And, when we update an issue we write a new
  modified_ts entry, which implicitly invalidate all search result
  cache entries that were written earlier because they are now stale.  When
  reading from the cache, we ignore any query project with modified_ts
  after its search result cache timestamp, because it is stale.

  Args:
    mr: common information parsed from the request.
    query_project_ids: list of project ID numbers for all projects being
        searched.
    needed_shard_keys: set of shard keys that need to be checked.
    harmonized_config: ProjectIsueConfig with combined information for all
        projects involved in this search.
    project_shard_timestamps: a dict {(project_id, shard_id): timestamp, ...}
        that tells when each shard was last invalidated.
    services: connections to backends.

  Returns:
    Tuple consisting of:
      A dictionary {shard_id: [issue_id, ...], ...} of unfiltered search result
      issue IDs. Only shard_ids found in memcache will be in that dictionary.
      The result issue IDs must be permission checked before they can be
      considered to be part of the user's result set.
      A dictionary {shard_id: bool, ...}. The boolean is set to True if
      the search results limit of the shard is hit.
  """
    projects_str = ','.join(str(pid) for pid in sorted(query_project_ids))
    projects_str = projects_str or 'all'
    canned_query = savedqueries_helpers.SavedQueryIDToCond(
        mr.cnxn, services.features, mr.can)
    logging.info('canned query is %r', canned_query)
    canned_query = searchpipeline.ReplaceKeywordsWithUserID(
        mr.me_user_id, canned_query)

    sd = sorting.ComputeSortDirectives(mr, harmonized_config)
    sd_str = ' '.join(sd)
    memcache_key_prefix = '%s;%s' % (projects_str, canned_query)
    limit_reached_key_prefix = '%s;%s' % (projects_str, canned_query)

    cached_dict = memcache.get_multi([
        '%s;%s;%s;%d' % (memcache_key_prefix, subquery, sd_str, sid)
        for sid, subquery in needed_shard_keys
    ])
    cached_search_limit_reached_dict = memcache.get_multi([
        '%s;%s;%s;search_limit_reached;%d' %
        (limit_reached_key_prefix, subquery, sd_str, sid)
        for sid, subquery in needed_shard_keys
    ])

    unfiltered_dict = {}
    search_limit_reached_dict = {}
    for shard_key in needed_shard_keys:
        shard_id, subquery = shard_key
        memcache_key = '%s;%s;%s;%d' % (memcache_key_prefix, subquery, sd_str,
                                        shard_id)
        limit_reached_key = '%s;%s;%s;search_limit_reached;%d' % (
            limit_reached_key_prefix, subquery, sd_str, shard_id)
        if memcache_key not in cached_dict:
            logging.info('memcache miss on shard %r', shard_key)
            continue

        cached_iids, cached_ts = cached_dict[memcache_key]
        if cached_search_limit_reached_dict.get(limit_reached_key):
            search_limit_reached, _ = cached_search_limit_reached_dict[
                limit_reached_key]
        else:
            search_limit_reached = False

        stale = False
        if query_project_ids:
            for project_id in query_project_ids:
                modified_ts = project_shard_timestamps.get(
                    (project_id, shard_id))
                if modified_ts is None or modified_ts > cached_ts:
                    stale = True
                    logging.info(
                        'memcache too stale on shard %r because of %r',
                        shard_id, project_id)
                    break
        else:
            modified_ts = project_shard_timestamps.get(('all', shard_id))
            if modified_ts is None or modified_ts > cached_ts:
                stale = True
                logging.info('memcache too stale on shard %r because of all',
                             shard_id)

        if not stale:
            logging.info('memcache hit on %r', shard_key)
            unfiltered_dict[shard_key] = cached_iids
            search_limit_reached_dict[shard_key] = search_limit_reached

    return unfiltered_dict, search_limit_reached_dict