Beispiel #1
0
def get_revisions_list(bucket_path, testcase=None):
  """Returns a sorted ascending list of revisions from a bucket path, excluding
  bad build revisions and testcase crash revision (if any)."""
  revision_pattern = revisions.revision_pattern_from_build_bucket_path(
      bucket_path)

  revision_urls = get_build_urls_list(bucket_path, reverse=False)
  if not revision_urls:
    return None

  # Parse the revisions out of the build urls.
  revision_list = []
  for url in revision_urls:
    match = re.match(revision_pattern, url)
    if match:
      revision = revisions.convert_revision_to_integer(match.group(1))
      revision_list.append(revision)

  # Remove revisions for bad builds from the revision list.
  job_type = environment.get_value('JOB_NAME')
  bad_builds = ndb_utils.get_all_from_query(
      data_types.BuildMetadata.query(
          ndb_utils.is_true(data_types.BuildMetadata.bad_build),
          data_types.BuildMetadata.job_type == job_type))
  for bad_build in bad_builds:
    # Don't remove testcase revision even if it is in bad build list. This
    # usually happens when a bad bot sometimes marks a particular revision as
    # bad due to flakiness.
    if testcase and bad_build.revision == testcase.crash_revision:
      continue

    if bad_build.revision in revision_list:
      revision_list.remove(bad_build.revision)

  return revision_list
Beispiel #2
0
def setup_trunk_build():
  """Sets up latest trunk build."""
  release_build_bucket_path = environment.get_value('RELEASE_BUILD_BUCKET_PATH')
  sym_release_build_bucket_path = environment.get_value(
      'SYM_RELEASE_BUILD_BUCKET_PATH')
  sym_debug_build_bucket_path = environment.get_value(
      'SYM_DEBUG_BUILD_BUCKET_PATH')

  base_build_dir = _base_build_dir(release_build_bucket_path)
  _setup_build_directories(base_build_dir)

  release_build_urls = get_build_urls_list(release_build_bucket_path)
  if not release_build_urls:
    logs.log_error('Error getting list of release build urls from %s.' %
                   release_build_bucket_path)
    return None

  other_build_url_lists = []
  if sym_release_build_bucket_path:
    sym_release_build_urls = get_build_urls_list(sym_release_build_bucket_path)
    if not sym_release_build_urls:
      logs.log_error(
          'Error getting list of symbolized release build urls from %s.' %
          sym_release_build_bucket_path)
      return None
    other_build_url_lists.append((sym_release_build_bucket_path,
                                  sym_release_build_urls))
  if sym_debug_build_bucket_path:
    sym_debug_builds_urls = get_build_urls_list(sym_debug_build_bucket_path)
    if not sym_debug_builds_urls:
      logs.log_error(
          'Error getting list of symbolized debug build urls from %s.' %
          sym_debug_build_bucket_path)
      return None
    other_build_url_lists.append((sym_debug_build_bucket_path,
                                  sym_debug_builds_urls))

  revision_pattern = revisions.revision_pattern_from_build_bucket_path(
      release_build_bucket_path)
  found_revision = False
  for release_build_url in release_build_urls:
    match = re.match(revision_pattern, release_build_url)
    if not match:
      continue

    revision = revisions.convert_revision_to_integer(match.group(1))
    if (not other_build_url_lists or all(
        revisions.find_build_url(l[0], l[1], revision)
        for l in other_build_url_lists)):
      found_revision = True
      break

  if not found_revision:
    logs.log_error('Unable to find a matching revision.')
    return None

  return setup_regular_build(revision)
def execute_task(fuzzer_name_and_revision, job_type):
    """Execute corpus pruning task."""
    # TODO(ochang): Remove this once remaining jobs in queue are all processed.
    if '@' in fuzzer_name_and_revision:
        full_fuzzer_name, revision = fuzzer_name_and_revision.split('@')
        revision = revisions.convert_revision_to_integer(revision)
    else:
        full_fuzzer_name = fuzzer_name_and_revision
        revision = 0

    fuzz_target = data_handler.get_fuzz_target(full_fuzzer_name)
    task_name = 'corpus_pruning_%s_%s' % (full_fuzzer_name, job_type)

    # Get status of last execution.
    last_execution_metadata = data_handler.get_task_status(task_name)
    last_execution_failed = (last_execution_metadata
                             and last_execution_metadata.status
                             == data_types.TaskState.ERROR)

    # Make sure we're the only instance running for the given fuzzer and
    # job_type.
    if not data_handler.update_task_status(task_name,
                                           data_types.TaskState.STARTED):
        logs.log('A previous corpus pruning task is still running, exiting.')
        return

    # Setup fuzzer and data bundle.
    if not setup.update_fuzzer_and_data_bundles(fuzz_target.engine):
        raise CorpusPruningException('Failed to set up fuzzer %s.' %
                                     fuzz_target.engine)

    use_minijail = environment.get_value('USE_MINIJAIL')

    # TODO(unassigned): Use coverage information for better selection here.
    cross_pollinate_fuzzers = _get_cross_pollinate_fuzzers(
        fuzz_target.engine, full_fuzzer_name)

    context = Context(fuzz_target, cross_pollinate_fuzzers, use_minijail)

    # Copy global blacklist into local suppressions file if LSan is enabled.
    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # TODO(ochang): Copy this to untrusted worker.
        leak_blacklist.copy_global_to_local_blacklist()

    try:
        result = do_corpus_pruning(context, last_execution_failed, revision)
        _save_coverage_information(context, result)
        _process_corpus_crashes(context, result)
    except CorpusPruningException as e:
        logs.log_error('Corpus pruning failed: %s.' % str(e))
        data_handler.update_task_status(task_name, data_types.TaskState.ERROR)
        return
    finally:
        context.cleanup()

    data_handler.update_task_status(task_name, data_types.TaskState.FINISHED)
Beispiel #4
0
    def test_convert_revision_to_integer_version_string(self, _):
        """Test version string conversions in convert_revision_to_integer."""
        revision = revisions.convert_revision_to_integer('1.1.1.1')

        # See the full comment in convert_revision_to_integer, but we pad this with
        # zeros to allow sorting.
        self.assertEqual(revision, 1000010000100001)

        # Ensure that the max lengths for each part are supported.
        revision = revisions.convert_revision_to_integer(
            '12345.67890.12345.67890')
        self.assertEqual(revision, 12345678901234567890)

        # Ensure that we raise an exception if any of the individual parts are too
        # long.
        with self.assertRaises(ValueError):
            revisions.convert_revision_to_integer('123456.12345.12345.12345')

        with self.assertRaises(ValueError):
            revisions.convert_revision_to_integer('12345.123456.12345.12345')

        with self.assertRaises(ValueError):
            revisions.convert_revision_to_integer('12345.12345.123456.12345')

        with self.assertRaises(ValueError):
            revisions.convert_revision_to_integer('12345.12345.12345.123456')

        # Ensure that junk strings also raise value errors.
        with self.assertRaises(ValueError):
            revisions.convert_revision_to_integer('123junk')

        with self.assertRaises(ValueError):
            revisions.convert_revision_to_integer('junk')

        with self.assertRaises(ValueError):
            revisions.convert_revision_to_integer('junk123')

        with self.assertRaises(ValueError):
            revisions.convert_revision_to_integer('...')
Beispiel #5
0
 def test_convert_revision_to_integer_simple(self):
     """Test the simple revision case of convert_revision_to_integer."""
     revision = revisions.convert_revision_to_integer('12345')
     self.assertEqual(revision, 12345)