Exemple #1
0
def get_all_from_query(query, **kwargs):
    """Return all entities based on the query by paging, to avoid query
  expirations on App Engine."""
    if isinstance(query, ndb_patcher.Query):
        # Not necessary with ndb_patcher.Query.
        for result in query.iter(**kwargs):
            yield result

        return

    batch_size = kwargs.pop('batch_size', DEFAULT_BATCH_SIZE)
    kwargs['batch_size'] = batch_size

    while True:
        entities, cursor, more = query.fetch_page(batch_size, **kwargs)
        if not entities:
            break

        for entity in entities:
            yield entity

        kwargs['start_cursor'] = cursor

        if not more:
            # No more results to process, bail out.
            break

        # Free up some memory in between batches.
        del entities
        utils.python_gc()
Exemple #2
0
def cleanup_task_state():
  """Cleans state before and after a task is executed."""
  # Cleanup stale processes.
  process_handler.cleanup_stale_processes()

  # Clear build urls, temp and testcase directories.
  shell.clear_build_urls_directory()
  shell.clear_crash_stacktraces_directory()
  shell.clear_testcase_directories()
  shell.clear_temp_directory()
  shell.clear_system_temp_directory()
  shell.clear_device_temp_directories()

  # Reset memory tool environment variables.
  environment.reset_current_memory_tool_options()

  # Call python's garbage collector.
  utils.python_gc()
Exemple #3
0
    def get(self):
        """Handle a get request."""
        try:
            grouper.group_testcases()
        except:
            logs.log_error('Error occurred while grouping test cases.')
            return

        # Free up memory after group task run.
        utils.python_gc()

        # Get a list of jobs excluded from bug filing.
        excluded_jobs = _get_excluded_jobs()

        # Get a list of all jobs. This is used to filter testcases whose jobs have
        # been removed.
        all_jobs = data_handler.get_all_job_type_names()

        for testcase_id in data_handler.get_open_testcase_id_iterator():
            try:
                testcase = data_handler.get_testcase_by_id(testcase_id)
            except errors.InvalidTestcaseError:
                # Already deleted.
                continue

            # Skip if testcase's job is removed.
            if testcase.job_type not in all_jobs:
                continue

            # Skip if testcase's job is in exclusions list.
            if testcase.job_type in excluded_jobs:
                continue

            # Skip if we are running progression task at this time.
            if testcase.get_metadata('progression_pending'):
                continue

            # If the testcase has a bug filed already, no triage is needed.
            if _is_bug_filed(testcase):
                continue

            # Check if the crash is important, i.e. it is either a reproducible crash
            # or an unreproducible crash happening frequently.
            if not _is_crash_important(testcase):
                continue

            # Require that all tasks like minimizaton, regression testing, etc have
            # finished.
            if not data_handler.critical_tasks_completed(testcase):
                continue

            # For testcases that are not part of a group, wait an additional time till
            # group task completes.
            # FIXME: In future, grouping might be dependent on regression range, so we
            # would have to add an additional wait time.
            if not testcase.group_id and not dates.time_has_expired(
                    testcase.timestamp,
                    hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT):
                continue

            # If this project does not have an associated issue tracker, we cannot
            # file this crash anywhere.
            issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
                testcase)
            if not issue_tracker:
                continue

            # If there are similar issues to this test case already filed or recently
            # closed, skip filing a duplicate bug.
            if _check_and_update_similar_bug(testcase, issue_tracker):
                continue

            # Clean up old triage messages that would be not applicable now.
            testcase.delete_metadata(TRIAGE_MESSAGE_KEY, update_testcase=False)

            # File the bug first and then create filed bug metadata.
            try:
                issue_filer.file_issue(testcase, issue_tracker)
            except Exception:
                logs.log_error('Failed to file issue for testcase %d.' %
                               testcase_id)
                continue

            _create_filed_bug_metadata(testcase)
            logs.log('Filed new issue %s for testcase %d.' %
                     (testcase.bug_information, testcase_id))
Exemple #4
0
def _unpack_build(base_build_dir, build_dir, build_url, target_weights=None):
  """Unpacks a build from a build url into the build directory."""
  # Track time taken to unpack builds so that it doesn't silently regress.
  start_time = time.time()

  # Free up memory.
  utils.python_gc()

  # Remove the current build.
  logs.log('Removing build directory %s.' % build_dir)
  if not shell.remove_directory(build_dir, recreate=True):
    logs.log_error('Unable to clear build directory %s.' % build_dir)
    _handle_unrecoverable_error_on_windows()
    return False

  # Decide whether to use cache build archives or not.
  use_cache = environment.get_value('CACHE_STORE', False)

  # Download build archive locally.
  build_local_archive = os.path.join(build_dir, os.path.basename(build_url))

  # Make the disk space necessary for the archive available.
  archive_size = storage.get_download_file_size(
      build_url, build_local_archive, use_cache=True)
  if archive_size is not None and not _make_space(archive_size, base_build_dir):
    shell.clear_data_directories()
    logs.log_fatal_and_exit(
        'Failed to make space for download. '
        'Cleared all data directories to free up space, exiting.')

  logs.log('Downloading build from url %s.' % build_url)
  try:
    storage.copy_file_from(build_url, build_local_archive, use_cache=use_cache)
  except:
    logs.log_error('Unable to download build url %s.' % build_url)
    return False

  unpack_everything = environment.get_value('UNPACK_ALL_FUZZ_TARGETS_AND_FILES')
  if not unpack_everything:
    # For fuzzing, pick a random fuzz target so that we only un-archive that
    # particular fuzz target and its dependencies and save disk space.
    # If we are going to unpack everythng in archive based on
    # |UNPACK_ALL_FUZZ_TARGETS_AND_FILES| in the job defition, then don't set a
    # random fuzz target before we've unpacked the build. It won't actually save
    # us anything in this case and can be really expensive for large builds
    # (such as Chrome OS). Defer setting it until after the build has been
    # unpacked.
    _set_random_fuzz_target_for_fuzzing_if_needed(
        _get_fuzz_targets_from_archive(build_local_archive), target_weights)

  # Actual list of files to unpack can be smaller if we are only unarchiving
  # a particular fuzz target.
  file_match_callback = _get_file_match_callback()
  assert not (unpack_everything and file_match_callback is not None)

  if not _make_space_for_build(build_local_archive, base_build_dir,
                               file_match_callback):
    shell.clear_data_directories()
    logs.log_fatal_and_exit(
        'Failed to make space for build. '
        'Cleared all data directories to free up space, exiting.')

  # Unpack the local build archive.
  logs.log('Unpacking build archive %s.' % build_local_archive)
  trusted = not utils.is_oss_fuzz()
  try:
    archive.unpack(
        build_local_archive,
        build_dir,
        trusted=trusted,
        file_match_callback=file_match_callback)
  except:
    logs.log_error('Unable to unpack build archive %s.' % build_local_archive)
    return False

  if unpack_everything:
    # Set a random fuzz target now that the build has been unpacked, if we
    # didn't set one earlier.
    _set_random_fuzz_target_for_fuzzing_if_needed(
        _get_fuzz_targets_from_dir(build_dir), target_weights)

  # If this is partial build due to selected build files, then mark it as such
  # so that it is not re-used.
  if file_match_callback:
    partial_build_file_path = os.path.join(build_dir, PARTIAL_BUILD_FILE)
    utils.write_data_to_file('', partial_build_file_path)

  # No point in keeping the archive around.
  shell.remove_file(build_local_archive)

  end_time = time.time()
  elapsed_time = end_time - start_time
  log_func = logs.log_warn if elapsed_time > UNPACK_TIME_LIMIT else logs.log
  log_func('Build took %0.02f minutes to unpack.' % (elapsed_time / 60.))

  return True
Exemple #5
0
def cleanup_testcases_and_issues():
  """Clean up unneeded open testcases and their associated issues."""
  jobs = data_handler.get_all_job_type_names()
  testcase_keys = ndb_utils.get_all_from_query(
      data_types.Testcase.query(
          ndb_utils.is_false(data_types.Testcase.triaged)),
      keys_only=True)
  top_crashes_by_project_and_platform_map = (
      get_top_crashes_for_all_projects_and_platforms())

  utils.python_gc()

  testcases_processed = 0
  empty_issue_tracker_policy = issue_tracker_policy.get_empty()
  for testcase_key in testcase_keys:
    testcase_id = testcase_key.id()
    try:
      testcase = data_handler.get_testcase_by_id(testcase_id)
    except errors.InvalidTestcaseError:
      # Already deleted.
      continue

    logs.log('Processing testcase %d.' % testcase_id)

    issue = issue_tracker_utils.get_issue_for_testcase(testcase)
    policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase(testcase)
    if not policy:
      policy = empty_issue_tracker_policy

    # Issue updates.
    update_os_labels(policy, testcase, issue)
    update_fuzz_blocker_label(policy, testcase, issue,
                              top_crashes_by_project_and_platform_map)
    update_component_labels(testcase, issue)
    update_issue_ccs_from_owners_file(policy, testcase, issue)
    update_issue_owner_and_ccs_from_predator_results(policy, testcase, issue)
    update_issue_labels_for_flaky_testcase(policy, testcase, issue)

    # Testcase marking rules.
    mark_duplicate_testcase_as_closed_with_no_issue(testcase)
    mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue)
    mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue)
    mark_testcase_as_closed_if_job_is_invalid(testcase, jobs)
    mark_unreproducible_testcase_as_fixed_if_issue_is_closed(testcase, issue)
    mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
        policy, testcase, issue)

    # Notification, to be done at end after testcase state is updated from
    # previous rules.
    notify_closed_issue_if_testcase_is_open(policy, testcase, issue)
    notify_issue_if_testcase_is_invalid(policy, testcase, issue)
    notify_uploader_when_testcase_is_processed(policy, testcase, issue)

    # Mark testcase as triage complete if both testcase and associated issue
    # are closed. This also need to be done before the deletion rules.
    mark_testcase_as_triaged_if_needed(testcase, issue)

    # Testcase deletion rules.
    delete_unreproducible_testcase_with_no_issue(testcase)

    testcases_processed += 1
    if testcases_processed % 100 == 0:
      utils.python_gc()