示例#1
0
    def run(self, raw_crash_data):
        """Fire off pipelines to run the analysis and publish its results.

    N.B., due to the structure of AppEngine pipelines, this method must
    accept the same arguments as are passed to ``__init__``; however,
    because they were already passed to ``__init__`` there's no use in
    recieving them here. Thus, we discard all the arguments to this method
    (except for ``self``, naturally).
    """
        predator_client = PredatorForClientID(
            self._client_id,
            CachedGitilesRepository.Factory(HttpClientAppengine()),
            CrashConfig.Get())
        crash_data = predator_client.GetCrashData(raw_crash_data)

        need_analysis = predator_client.NeedsNewAnalysis(crash_data)
        if need_analysis:
            logging.info('New %s analysis is scheduled for %s',
                         self._client_id, crash_data.identifiers)

            UpdateCrashAnalysisData(crash_data, predator_client)
            run_analysis = yield CrashAnalysisPipeline(self._client_id,
                                                       crash_data.identifiers)
            with pipeline.After(run_analysis):
                yield PublishResultPipeline(self._client_id,
                                            crash_data.identifiers)
        else:
            yield PublishResultPipeline(self._client_id,
                                        crash_data.identifiers)
def GetTryJobs(build_ids):
  """Returns the try-job builds for the given build ids.

  Args:
    build_ids (list): a list of build ids returned by Buildbucket.

  Returns:
    A list of tuple (error, build) in the same order as given build ids.
      error: an instance of BuildbucketError. None if no error occurred.
      build: an instance of BuildbucketBuild. None if error occurred.
  """
  json_results = []
  headers = {
      'Authorization': 'Bearer ' + auth_util.GetAuthToken(),
      'Content-Type': 'application/json; charset=UTF-8'
  }

  for build_id in build_ids:
    status_code, content = HttpClientAppengine().Get(
        _BUILDBUCKET_PUT_GET_ENDPOINT + '/' + build_id, headers=headers)
    if status_code == 200:  # pragma: no cover
      json_results.append(json.loads(content))
    else:
      error_content = {
          'error': {
              'reason': status_code,
              'message': content
          }
      }
      json_results.append(error_content)

  return _ConvertFuturesToResults(json_results)
示例#3
0
 def __init__(self, client_id, crash_identifiers):
   super(CrashBasePipeline, self).__init__(client_id, crash_identifiers)
   self._crash_identifiers = crash_identifiers
   self._findit = FinditForClientID(
       client_id,
       CachedGitilesRepository.Factory(HttpClientAppengine()),
       CrashConfig.Get())
示例#4
0
 def __init__(self, client_id, crash_identifiers):
     super(CrashBasePipeline, self).__init__(client_id, crash_identifiers)
     self._crash_identifiers = crash_identifiers
     self._predator = PredatorForClientID(
         client_id, CachedGitilesRepository.Factory(HttpClientAppengine()),
         CrashConfig.Get())
     self._predator.SetLog(self.log)
def _GetSwarmingTaskIdForTryJob(report, revision, step_name, test_name):
  """Check json output for each task and return id of the one with test result.
  """
  if not report:
    return None

  http_client = HttpClientAppengine()

  step_result = report.get('result', {}).get(revision, {}).get(
      step_name, {})
  pass_fail_counts = step_result.get('pass_fail_counts', {}).get(test_name)
  task_ids = step_result.get('step_metadata', {}).get('swarm_task_ids', [])

  if len(task_ids) == 1:
    return task_ids[0]

  if not pass_fail_counts:  # Test doesn't exist.
    return task_ids[0] if task_ids else None

  for task_id in task_ids:
    output_json = swarming_util.GetIsolatedOutputForTask(task_id, http_client)
    if output_json:
      for data in output_json.get('per_iteration_data', []):
        # If this task doesn't have result, per_iteration_data will look like
        # [{}, {}, ...]
        if data:
          return task_id

  return None
示例#6
0
    def _CheckFirstKnownFailureForSwarmingTests(self, master_name,
                                                builder_name, build_number,
                                                failed_steps, builds):
        """Uses swarming test results to update first failure info at test level."""
        http_client = HttpClientAppengine()

        # Identifies swarming tests and saves isolated data to them.
        result = swarming_util.GetIsolatedDataForFailedBuild(
            master_name, builder_name, build_number, failed_steps, http_client)
        if not result:
            return

        for step_name, failed_step in failed_steps.iteritems():
            if not failed_step.get('list_isolated_data'):  # Non-swarming step.
                continue  # pragma: no cover.

            # Checks tests in one step and updates failed_step info if swarming.
            result = self._StartTestLevelCheckForFirstFailure(
                master_name, builder_name, build_number, step_name,
                failed_step, http_client)

            if result:  # pragma: no cover
                # Iterates backwards to get a more precise failed_steps info.
                self._UpdateFirstFailureOnTestLevel(master_name, builder_name,
                                                    build_number, step_name,
                                                    failed_step, http_client)

        self._UpdateFailureInfoBuilds(failed_steps, builds)
def TriggerTryJobs(try_jobs):
  """Triggers try-job in a batch.

  Args:
    try_jobs (list): a list of TryJob instances.

  Returns:
    A list of tuple (error, build) in the same order as the given try-jobs.
      error: an instance of BuildbucketError. None if no error occurred.
      build: an instance of BuildbucketBuild. None if error occurred.
  """
  json_results = []
  headers = {
      'Authorization': 'Bearer ' + auth_util.GetAuthToken(),
      'Content-Type': 'application/json; charset=UTF-8'
  }

  for try_job in try_jobs:
    status_code, content = HttpClientAppengine().Put(
        _BUILDBUCKET_PUT_GET_ENDPOINT,
        json.dumps(try_job.ToBuildbucketRequest()), headers=headers)
    if status_code == 200:  # pragma: no cover
      json_results.append(json.loads(content))
    else:
      error_content = {
          'error': {
              'reason': status_code,
              'message': content
          }
      }
      json_results.append(error_content)

  return _ConvertFuturesToResults(json_results)
示例#8
0
def GetPossibleRevertInfoFromRevision(revision):
  """Parse message to get information of reverting and reverted cls."""
  git_repo = CachedGitilesRepository(
      HttpClientAppengine(),
      'https://chromium.googlesource.com/chromium/src.git')
  change_log = git_repo.GetChangeLog(revision)
  if not change_log:  # pragma: no cover
    return {}

  reverted_revision = change_log.reverted_revision
  if not reverted_revision:
    return {}

  reverted_cl_change_log = git_repo.GetChangeLog(reverted_revision)

  data = {
      'action': 'Reverted',
      'fixed_revision': reverted_revision,
      'fixed_cl_review_url': (reverted_cl_change_log.code_review_url
          if reverted_cl_change_log else None),
      'fixed_cl_commit_position': (reverted_cl_change_log.commit_position
          if reverted_cl_change_log else None),
      'fixing_revision': revision,
      'fixing_cl_review_url': change_log.code_review_url,
      'fixing_cl_commit_position': change_log.commit_position
  }
  return data
示例#9
0
    def run(self, client_id, crash_keys, publish_to_client=False):
        """Reruns analysis for a batch of crashes.

    Args:
      client_id (CrashClient): The client whose crash we should iterate.
      crash_keys (list): A list of urlsafe encodings of crash keys.
    """
        client = PredatorForClientID(
            client_id, CachedGitilesRepository.Factory(HttpClientAppengine()),
            CrashConfig.Get())

        updated = []
        for key in crash_keys:
            key = ndb.Key(urlsafe=key)
            crash = key.get()
            crash.ReInitialize(client)
            updated.append(crash)

        ndb.put_multi(updated)

        for crash in updated:
            logging.info('Initialize analysis for crash %s', crash.identifiers)
            if publish_to_client:
                run_analysis = yield CrashAnalysisPipeline(
                    client_id, crash.identifiers)
                with pipeline.After(run_analysis):
                    yield PublishResultPipeline(client_id, crash.identifiers)
            else:
                yield CrashAnalysisPipeline(client_id, crash.identifiers)
示例#10
0
def FindMatchingWaterfallStep(build_step, test_name):
    """Finds the matching Waterfall step and checks whether it is supported.

  Only Swarmed and gtest-based steps are supported at the moment.

  Args:
    build_step (BuildStep): A build step on Waterfall or Commit Queue. It
        will be updated with the matching Waterfall step and whether it is
        Swarmed and supported.
    test_name (str): The name of the test.
  """

    build_step.swarmed = False
    build_step.supported = False

    http_client = HttpClientAppengine()

    if build_step.on_cq:
        wf_master_name, wf_builder_name, wf_build_number, wf_step_name, metadata = (
            _GetMatchingWaterfallBuildStep(build_step, http_client))

        build_step.wf_master_name = wf_master_name
        build_step.wf_builder_name = wf_builder_name
        build_step.wf_build_number = wf_build_number
        build_step.wf_step_name = wf_step_name

        if not build_step.has_matching_waterfall_step:
            return
    else:
        build_step.wf_master_name = build_step.master_name
        build_step.wf_builder_name = build_step.builder_name
        build_step.wf_build_number = build_step.build_number
        build_step.wf_step_name = build_step.step_name
        metadata = buildbot.GetStepLog(build_step.master_name,
                                       build_step.builder_name,
                                       build_step.build_number,
                                       build_step.step_name, http_client,
                                       'step_metadata')
        if not metadata:
            logging.error('Couldn\'t get step_metadata')
            return

    # Query Swarming for isolated data.
    build_step.swarmed = True if metadata.get('swarm_task_ids') else False

    if build_step.swarmed:
        # Retrieve a sample output from Isolate.
        task_id = metadata['swarm_task_ids'][0]
        output = swarming_util.GetIsolatedOutputForTask(task_id, http_client)
        if output:
            # Guess from the format.
            build_step.supported = (
                isinstance(output, dict)
                and isinstance(output.get('all_tests'), list)
                and test_name in output.get('all_tests', [])
                and isinstance(output.get('per_iteration_data'), list) and all(
                    isinstance(i, dict)
                    for i in output.get('per_iteration_data')))
    def testDEPSDownloaderForChromeVersion(self):
        def _MockGet(*_):
            return 200, base64.b64encode('Dummy DEPS content'), {}

        self.mock(HttpClientAppengine, '_Get', _MockGet)
        deps_downloader = chrome_dependency_fetcher.DEPSDownloader(
            gitiles_repository.GitilesRepository.Factory(
                HttpClientAppengine()))
        content = deps_downloader.Load('http://chrome-internal', '50.0.1234.0',
                                       'DEPS')
        self.assertEqual(content, 'Dummy DEPS content')
  def HandleGet(self):
    # Update component_classifier with latest component/team information.
    new_config_dict = {'component_classifier': GetComponentClassifierConfig(
        OWNERS_MAPPING_URL, PREDATOR_MAPPING_URL, HttpClientAppengine())}
    if not new_config_dict.get('component_classifier'):  # pragma: no cover.
      return BaseHandler.CreateError(
          'Component Classifier Config Update Fail', 400)

    crash_config = CrashConfig.Get()
    crash_config.Update(
        users.User('*****@*****.**'), True, **new_config_dict)
def _GetGitBlame(repo_info, touched_file_path):
    """Gets git blames of touched_file.

  Args:
    repo_info (dict): The repo_url and revision for the build cycle.
    touched_file_path (str): Full path of a file in change_log.
  """
    if repo_info:
        repo_url = repo_info['repo_url']
        git_repo = CachedGitilesRepository(HttpClientAppengine(), repo_url)
        revision = repo_info['revision']
        return git_repo.GetBlame(touched_file_path, revision)
示例#14
0
 def testSendRequestToServerRetryTimeout(self, _):
     override_swarming_settings = {
         'should_retry_server': True,
         'server_retry_timeout_hours': -1
     }
     self.UpdateUnitTestConfigSettings('swarming_settings',
                                       override_swarming_settings)
     content, error = swarming_util._SendRequestToServer(
         'http://www.someurl.com', HttpClientAppengine())
     self.assertIsNone(content)
     self.assertEqual(error['code'],
                      swarming_util.URLFETCH_CONNECTION_CLOSED_ERROR)
     self.assertTrue(error['retry_timeout'])
示例#15
0
  def HandleGet(self):
    """Update the repo_to_dep_path in config from the lastest DEPS."""
    # Update repo_to_dep_path to the latest information.
    dep_fetcher = ChromeDependencyFetcher(
      CachedGitilesRepository.Factory(HttpClientAppengine()))

    repo_to_dep_path = GetRepoToDepPath(dep_fetcher)
    if not repo_to_dep_path:  # pragma: no cover.
      return self.CreateError('Fail to update repo_to_dep_path config.', 400)

    crash_config = CrashConfig.Get()
    crash_config.Update(users.User(app_identity.get_service_account_name()),
                        True, repo_to_dep_path=repo_to_dep_path)
示例#16
0
def _GetDependencies(chromium_revision, os_platform):
    """Returns the dependencies used by the specified chromium revision."""
    deps = {}
    dep_fetcher = chrome_dependency_fetcher.ChromeDependencyFetcher(
        CachedGitilesRepository.Factory(HttpClientAppengine()))
    for path, dependency in dep_fetcher.GetDependency(chromium_revision,
                                                      os_platform).iteritems():
        deps[path] = {
            'repo_url': dependency.repo_url,
            'revision': dependency.revision,
        }

    return deps
示例#17
0
    def testFindCulprit(self, mock_find_culprit):
        mock_find_culprit.return_value = None

        # TODO(wrengr): would be less fragile to call
        # PredatorForFracas.CreateAnalysis instead; though if I'm right about
        # the original purpose of this test, then this is one of the few
        # places where calling FracasCrashAnalysis directly would actually
        # make sense.
        analysis = FracasCrashAnalysis.Create({'signature': 'sig'})
        predator_client = _PredatorForChromeCrash(
            GitilesRepository.Factory(HttpClientAppengine()),
            CrashConfig.Get())
        self.assertIsNone(predator_client.FindCulprit(analysis))
    def testFindCulprit(self):
        self.mock(FinditForChromeCrash, 'FindCulprit', lambda self, *_: None)

        # TODO(wrengr): would be less fragile to call
        # FinditForFracas.CreateAnalysis instead; though if I'm right about
        # the original purpose of this test, then this is one of the few
        # places where calling FracasCrashAnalysis directly would actually
        # make sense.
        analysis = FracasCrashAnalysis.Create({'signature': 'sig'})
        findit_client = _FinditForChromeCrash(
            GitilesRepository.Factory(HttpClientAppengine()),
            CrashConfig.Get())
        self.assertIsNone(findit_client.FindCulprit(analysis))
def _GetChangedLinesForDependencyRepo(roll, file_path_in_log, line_numbers):
    """Gets changed line numbers for file in failure log.

    Tests if the same lines mentioned in failure log are changed within
    the DEPS roll, if so, return those line numbers.
  """
    roll_repo = CachedGitilesRepository(HttpClientAppengine(),
                                        roll['repo_url'])
    old_revision = roll['old_revision']
    new_revision = roll['new_revision']
    old_change_log = roll_repo.GetChangeLog(old_revision)
    old_rev_author_time = old_change_log.author.time
    new_change_log = roll_repo.GetChangeLog(new_revision)
    new_rev_author_time = new_change_log.author.time

    file_change_type = None
    changed_line_numbers = []

    if old_rev_author_time >= new_rev_author_time:
        # If the DEPS roll is downgrade, bail out.
        return file_change_type, changed_line_numbers

    commits_in_roll = roll_repo.GetCommitsBetweenRevisions(
        old_revision, new_revision)

    file_change_type, culprit_commit = _GetChangeTypeAndCulpritCommit(
        file_path_in_log, roll_repo, commits_in_roll)

    if culprit_commit is None:
        # Bail out if no commits touched the file in the log.
        return file_change_type, changed_line_numbers

    if file_change_type == ChangeType.MODIFY:
        # If the file was modified, use the blame information to determine which
        # lines were changed.
        blame = roll_repo.GetBlame(file_path_in_log, culprit_commit)

        if not blame:
            return file_change_type, changed_line_numbers

        for region in blame:
            if line_numbers:
                for line_number in line_numbers:
                    if (line_number >= region.start
                            and line_number <= region.start + region.count - 1
                            and region.revision in commits_in_roll):
                        # One line which appears in the failure log is changed within
                        # the DEPS roll.
                        changed_line_numbers.append(line_number)

    return file_change_type, changed_line_numbers
示例#20
0
def _GetCommitsBetweenRevisions(start_revision, end_revision):
    """Gets the revisions between start_revision and end_revision.

  Args:
    start_revision (str): The revision for which to get changes after. This
        revision is not included in the returned list.
    end_revision (str): The last revision in the range to return.

  Returns:
    A list of revisions sorted in order by oldest to newest.
  """
    repo = CachedGitilesRepository(HttpClientAppengine(), _CHROMIUM_REPO_URL)
    commits = repo.GetCommitsBetweenRevisions(start_revision, end_revision)
    commits.reverse()
    return commits
示例#21
0
    def testDEPSDownloaderForChromeVersion(self):
        def _MockGet(*_):
            return 200, base64.b64encode('Dummy DEPS content')

        self.mock(HttpClientAppengine, '_Get', _MockGet)
        deps_downloader = chrome_dependency_fetcher.DEPSDownloader(
            gitiles_repository.GitilesRepository.Factory(
                HttpClientAppengine()))
        content = deps_downloader.Load('http://chrome-internal', '50.0.1234.0',
                                       'DEPS')
        self.assertEqual(content, 'Dummy DEPS content')

        self.assertRaisesRegexp(
            Exception,
            'Failed to pull DEPS file from http://chrome, at revision 50.0.1234.1.',
            self.deps_downloader.Load, 'http://chrome', '50.0.1234.1', 'DEPS')
def GetCulpritInfo(repo_name, revision):
    """Returns culprit info of the given revision.

  Returns commit position, code-review url, host and change_id.
  """
    # TODO(stgao): get repo url at runtime based on the given repo name.
    # unused arg - pylint: disable=W0612,W0613
    repo = CachedGitilesRepository(
        HttpClientAppengine(),
        'https://chromium.googlesource.com/chromium/src.git')
    change_log = repo.GetChangeLog(revision)
    return {
        'commit_position': change_log.commit_position,
        'code_review_url': change_log.code_review_url,
        'review_server_host': change_log.review_server_host,
        'review_change_id': change_log.review_change_id
    }
示例#23
0
def GetComponentClassifierConfig(config, http_client=HttpClientAppengine()):
    """Get component mapping information from owners files and convert in
  Predator input format.

  The main purpose is to get the latest component/team information from
  OWNERS files and convert into predator mapping input format.

  Args:
    config(dict): Configuration of component classifier.

  Returns:
    A dict of {'component_info': data}, where data is a list of dict in the
    form {'component': component name.
          'dirs': a list of directories maps to this component.
          'team': the team owns this component.}.
    """
    component_dict = defaultdict(dict)
    # Mappings from OWNERS files.
    status_code, owner_mappings = http_client.Get(config['owner_mapping_url'])
    if status_code != 200:
        return None

    try:
        owner_mappings = json.loads(owner_mappings)
    except Exception:  # pragma: no cover
        logging.error(traceback.format_exc())
        return None

    for dir_name, component in owner_mappings['dir-to-component'].items():
        if component_dict.get(component) == None:
            component_dict[component]['component'] = component
            component_dict[component]['dirs'] = []
            if owner_mappings['component-to-team'].get(component):
                component_dict[component]['team'] = (
                    owner_mappings['component-to-team'].get(component))

        component_dict[component]['dirs'].append('src/' + dir_name)

    components = component_dict.values()

    component_classifier_config = {
        'component_info': components,
        'top_n': config['top_n'],
        'owner_mapping_url': config['owner_mapping_url']
    }
    return component_classifier_config
示例#24
0
    def setUp(self):
        super(StepMapperTest, self).setUp()
        self.http_client = HttpClientAppengine()
        self.master_name = 'tryserver.m'
        self.wf_master_name = 'm'
        self.builder_name = 'b'
        self.build_number = 123
        self.step_name = 'browser_tests on platform'
        self.build_step = BuildStep.Create(self.master_name, self.builder_name,
                                           self.build_number, self.step_name,
                                           None)
        self.build_step.put()

        self.wf_build_step = BuildStep.Create(self.wf_master_name,
                                              self.builder_name,
                                              self.build_number,
                                              self.step_name, None)
        self.wf_build_step.put()
示例#25
0
    def testUpdateFirstFailureOnTestLevelFlaky(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 223
        step_name = 'abc_test'
        failed_step = {
            'current_failure': 223,
            'first_failure': 221,
            'tests': {
                'Unittest2.Subtest1': {
                    'current_failure': 223,
                    'first_failure': 223,
                    'last_pass': 223,
                    'base_test_name': 'Unittest2.Subtest1'
                }
            }
        }
        step = WfStep.Create(master_name, builder_name, 222, step_name)
        step.isolated = True
        step.log_data = 'flaky'
        step.put()

        pipeline = DetectFirstFailurePipeline()
        pipeline._UpdateFirstFailureOnTestLevel(master_name, builder_name,
                                                build_number, step_name,
                                                failed_step,
                                                HttpClientAppengine())

        expected_failed_step = {
            'current_failure': 223,
            'first_failure': 223,
            'last_pass': 222,
            'tests': {
                'Unittest2.Subtest1': {
                    'current_failure': 223,
                    'first_failure': 223,
                    'last_pass': 222,
                    'base_test_name': 'Unittest2.Subtest1'
                }
            }
        }
        self.assertEqual(expected_failed_step, failed_step)
    def _BotsAvailableForTask(self, step_metadata):
        """Check if there are available bots for this task's dimensions."""
        if not step_metadata:
            return False

        minimum_number_of_available_bots = (
            waterfall_config.GetSwarmingSettings().get(
                'minimum_number_of_available_bots', _MINIMUM_NUMBER_BOT))
        minimum_percentage_of_available_bots = (
            waterfall_config.GetSwarmingSettings().get(
                'minimum_percentage_of_available_bots', _MINIMUM_PERCENT_BOT))
        dimensions = step_metadata.get('dimensions')
        bot_counts = swarming_util.GetSwarmingBotCounts(
            dimensions, HttpClientAppengine())

        total_count = bot_counts.get('count') or -1
        available_count = bot_counts.get('available', 0)
        available_rate = float(available_count) / total_count

        return (available_count > minimum_number_of_available_bots
                and available_rate > minimum_percentage_of_available_bots)
class PullChangelogPipeline(BasePipeline):
    """A pipeline to pull change log of CLs."""

    # TODO: for files in dependencies(blink, v8, skia, etc), use blame first.
    GIT_REPO = CachedGitilesRepository(
        HttpClientAppengine(),
        'https://chromium.googlesource.com/chromium/src.git')

    # Arguments number differs from overridden method - pylint: disable=W0221
    def run(self, failure_info):
        """
    Args:
      failure_info (dict): Output of pipeline DetectFirstFailurePipeline.run().

    Returns:
      A dict with the following form:
      {
        'git_hash_revision1': common.change_log.ChangeLog.ToDict(),
        ...
      }
    """
        change_logs = {}
        if not failure_info['failed'] or not failure_info['chromium_revision']:
            # Bail out if no failed step or no chromium revision.
            return change_logs

        # Bail out on infra failure
        if failure_info.get('failure_type') == failure_type.INFRA:
            return change_logs

        for build in failure_info.get('builds', {}).values():
            for revision in build['blame_list']:
                change_log = self.GIT_REPO.GetChangeLog(revision)
                if not change_log:  # pragma: no cover
                    raise pipeline.Retry('Failed to get change log for %s' %
                                         revision)

                change_logs[revision] = change_log.ToDict()

        return change_logs
    def HandleGet(self):
        http_client = HttpClientAppengine()
        supported_masters = _GetSupportedMasters()
        main_waterfall_builders = _GetBuildersOnMasters(
            supported_masters, http_client)
        trybot_config = FinditConfig.Get().builders_to_trybots
        covered_builders = _GetCoveredBuilders(trybot_config)
        missing_builders = _GetDiffBetweenDicts(main_waterfall_builders,
                                                covered_builders)
        deprecated_builders = _GetDiffBetweenDicts(covered_builders,
                                                   main_waterfall_builders)
        unused_variable_builders = _GetUnusedVariableBuilders(
            trybot_config, http_client)

        return {
            'template': 'check_trybot_mapping.html',
            'data': {
                'missing': missing_builders,
                'deprecated': deprecated_builders,
                'unused_variable_builders': unused_variable_builders
            }
        }
示例#29
0
def NeedNewAnalysis(json_crash_data):
  """Checks if an analysis is needed for this crash.

  Args:
    json_crash_data (dict): Crash information from clients.

  Returns:
    True if a new analysis is needed; False otherwise.
  """
  if json_crash_data.get('redo'):
    logging.info('Force redo crash %s',
                 repr(json_crash_data['crash_identifiers']))
    return True

  # N.B., must call FinditForClientID indirectly, for mock testing.
  findit_client = crash_pipeline.FinditForClientID(
      json_crash_data['client_id'],
      CachedGitilesRepository.Factory(HttpClientAppengine()), CrashConfig.Get())
  crash_data = findit_client.GetCrashData(json_crash_data)
  # Detect the regression range, and decide if we actually need to
  # run a new analysis or not.
  return findit_client.NeedsNewAnalysis(crash_data)
示例#30
0
def _DetectDependencyRolls(change_logs, os_platform):
    """Detect DEPS rolls in the given CL change logs.

  Args:
    change_logs (dict): Output of pipeline PullChangelogPipeline.run().

  Returns:
    A dict in the following form:
    {
      'git_revision': [
        {
          'path': 'src/path/to/dependency/',
          'repo_url': 'https://url/to/dependency/repo.git',
          'new_revision': 'git_hash1',
          'old_revision': 'git_hash2',
        },
        ...
      ],
      ...
    }
  """
    deps_rolls = {}
    dep_fetcher = chrome_dependency_fetcher.ChromeDependencyFetcher(
        CachedGitilesRepository.Factory(HttpClientAppengine()))
    for revision, change_log in change_logs.iteritems():
        # Check DEPS roll only if the chromium DEPS file is changed by the CL.
        for touched_file in change_log['touched_files']:
            if touched_file['new_path'] == 'DEPS':
                # In git, r^ refers to the previous revision of r.
                old_revision = '%s^' % revision
                rolls = dep_fetcher.GetDependencyRolls(old_revision, revision,
                                                       os_platform)
                deps_rolls[revision] = [roll.ToDict() for roll in rolls]
                break

    return deps_rolls