Пример #1
0
def _CheckForNewAnalysis(request, rerun=False):
    """Checks if a new analysis is needed for the requested flake.

  Args:
    request (FlakeAnalysisRequest): The request to analyze a flake.
    rerun (bool): Indicates a forced rerun by admin.

  Returns:
    (version_number, build_step)
    version_number (int): The version of the FlakeAnalysisRequest if a new
        analysis is needed; otherwise 0.
    build_step (BuildStep): a BuildStep instance if a new analysis is needed;
        otherwise None.
  """
    existing_request = FlakeAnalysisRequest.GetVersion(key=request.name)
    if not existing_request or (existing_request.bug_id and request.bug_id
                                and existing_request.bug_id != request.bug_id):
        # If no existing analysis or last analysis was for a different bug, randomly
        # pick one configuration for a new analysis.
        if existing_request:
            # Make a copy to preserve the version number of existing analysis and
            # prevent concurrent analyses of the same flake.
            user_emails = (email_util.ObscureEmails(
                existing_request.user_emails, ['google.com']) +
                           list(set(request.user_emails)))
            existing_request.CopyFrom(request)
            request = existing_request
            request.user_emails = user_emails
        request.user_emails_obscured = False
        request.user_emails_last_edit = time_util.GetUTCNow()

        swarmed, supported, supported_build_step = _CheckFlakeSwarmedAndSupported(
            request)
        request.swarmed = swarmed
        request.supported = supported

        if supported_build_step and not request.is_step:
            supported_build_step.scheduled = True  # This step will be analyzed.

        # For unsupported or step-level flakes, still save them for monitoring.
        _, saved = request.Save(
            retry_on_conflict=False)  # Create a new version.

        if not saved or not supported_build_step or request.is_step:
            # No new analysis if:
            # 1. Another analysis was just triggered.
            # 2. No representative step is Swarmed Gtest.
            # 3. The flake is a step-level one.
            return 0, None

        return request.version_number, supported_build_step
    else:
        # If no bug is attached to the existing analysis or the new request, or both
        # are attached to the same bug, start a new analysis with a different
        # configuration. For a configuration that was analyzed 7 days ago, reset it
        # to use the new reported step of the same configuration.
        # TODO: move this setting to config.
        return _MergeNewRequestIntoExistingOne(request, existing_request,
                                               rerun)
Пример #2
0
 def testObscureEmails(self):
     emails = [
         'id', '*****@*****.**',
         '*****@*****.**'
     ]
     domains = 'google.com'
     expected_emails = [
         'xx', '*****@*****.**',
         '*****@*****.**'
     ]
     self.assertEqual(expected_emails,
                      email_util.ObscureEmails(emails, domains))
Пример #3
0
def _ObscureMasterFlakeAnalysis():
  """Obscures the user email in MasterFlakeAnalysis."""
  count = 0
  time_limit = _TimeBeforeNow(days=_REQUEST_RECORD_RENTENSION_DAYS)
  query = MasterFlakeAnalysis.query(
      MasterFlakeAnalysis.triggering_user_email_obscured == False,
      MasterFlakeAnalysis.request_time < time_limit)
  more = True
  cursor = None
  while more:
    entities, cursor, more = query.fetch_page(_PAGE_SIZE, start_cursor=cursor)
    for entity in entities:
      entity.triggering_user_email = email_util.ObscureEmails(
          [entity.triggering_user_email], ['google.com'])[0]
      entity.triggering_user_email_obscured = True
    ndb.put_multi(entities)
    count += len(entities)
  return count
Пример #4
0
def _ObscureFlakeAnalysisRequest():
  """Obscures the user emails in FlakeAnalysisRequest."""
  count = 0
  time_limit = _TimeBeforeNow(days=_REQUEST_RECORD_RENTENSION_DAYS)
  query = FlakeAnalysisRequest.query(
      FlakeAnalysisRequest.user_emails_obscured == False,
      FlakeAnalysisRequest.user_emails_last_edit < time_limit)
  more = True
  cursor = None
  while more:
    entities, cursor, more = query.fetch_page(_PAGE_SIZE, start_cursor=cursor)
    for entity in entities:
      entity.user_emails = email_util.ObscureEmails(entity.user_emails,
                                                    ['google.com'])
      entity.user_emails_obscured = True
    ndb.put_multi(entities)
    count += len(entities)
  return count
Пример #5
0
def _ObscureTriageRecordsInWfAnalysis():
  """Obscures the user names in WfAnalysis triage history."""
  count = 0
  time_limit = _TimeBeforeNow(days=_TRIAGE_RECORD_RENTENSION_DAYS)
  query = WfAnalysis.query(WfAnalysis.triage_email_obscured == False,
                           WfAnalysis.triage_record_last_add < time_limit)
  more = True
  cursor = None
  while more:
    entities, cursor, more = query.fetch_page(_PAGE_SIZE, start_cursor=cursor)
    for entity in entities:
      for triage_record in (entity.triage_history or []):
        triage_record['user_name'] = email_util.ObscureEmails(
            [triage_record['user_name']], ['google.com'])[0]
      entity.triage_email_obscured = True
    ndb.put_multi(entities)
    count += len(entities)
  return count
Пример #6
0
def _MergeNewRequestIntoExistingOne(new_request,
                                    existing_request,
                                    rerun=False):
    """Merges the new request into the existing request and creates a new record.

  Args:
    new_request (FlakeAnalysisRequest): The request to analyze a flake.
    existing_request (FlakeAnalysisRequest): The existing request in record.
    rerun (bool): The admin has forced a rerun.

  Returns:
    (version_number, build_step)
    version_number (int): The version of the FlakeAnalysisRequest if a new
        analysis is needed; otherwise 0.
    build_step (BuildStep): a BuildStep instance if a new analysis is needed;
        otherwise None.
  """
    # If no bug is attached to the existing analysis or the new request, or both
    # are attached to the same bug, start a new analysis with a different
    # configuration. For a configuration that was analyzed 7 days ago, reset it
    # to use the new reported step of the same configuration.
    # TODO: Move this setting to config.
    # TODO: Refactor this method, and put it in FlakeAnalysisRequest.
    seconds_n_days = 7 * 24 * 60 * 60  # 7 days.
    candidate_supported_steps = []
    need_updating = rerun
    for step in new_request.build_steps:
        existing_step = None
        for s in existing_request.build_steps:
            if (step.master_name == s.master_name
                    and step.builder_name == s.builder_name):
                existing_step = s
                break

        if rerun and existing_step:
            candidate_supported_steps.append(existing_step)

        if existing_step and not rerun:
            # If last reported flake at the existing step was too long ago, drop it
            # so that the new one is recorded.
            time_diff = step.reported_time - existing_step.reported_time
            if time_diff.total_seconds() > seconds_n_days:
                existing_request.build_steps.remove(existing_step)
                existing_step = None

        if not existing_step and not rerun:
            need_updating = True
            existing_request.build_steps.append(step)
            if step.supported:
                candidate_supported_steps.append(step)

    if not candidate_supported_steps:
        # Find some existing configuration that is not analyzed yet.
        for s in existing_request.build_steps:
            if not s.scheduled and s.supported:
                candidate_supported_steps.append(s)

    supported_build_step = None
    if candidate_supported_steps:
        supported_build_step = candidate_supported_steps[0]
        existing_request.swarmed = (existing_request.swarmed
                                    or supported_build_step.swarmed)
        existing_request.supported = True
        need_updating = True

    if supported_build_step and not existing_request.is_step:
        supported_build_step.scheduled = True  # This will be analyzed.

    if not existing_request.bug_id:  # No bug was attached before.
        existing_request.bug_id = new_request.bug_id
        need_updating = True

    if need_updating:
        existing_request.user_emails = (email_util.ObscureEmails(
            existing_request.user_emails, ['google.com']) +
                                        list(set(new_request.user_emails)))
        existing_request.user_emails_obscured = False
        existing_request.user_emails_last_edit = time_util.GetUTCNow()

        # This will create a new data entity.
        existing_request.put()

    if not supported_build_step or existing_request.is_step:
        # No new analysis if:
        # 1. All analyzed steps are fresh enough and cover all the steps in the
        #    request.
        # 2. No representative step is Swarmed Gtest.
        # 3. The flake is a step-level one.
        return 0, None

    return existing_request.version_number, supported_build_step