Exemple #1
0
def cleanup_reports_metadata():
    """Delete ReportMetadata for uploaded reports."""
    uploaded_reports = ndb_utils.get_all_from_query(
        data_types.ReportMetadata.query(
            ndb_utils.is_true(data_types.ReportMetadata.is_uploaded)),
        keys_only=True)
    ndb.delete_multi(uploaded_reports)
def update_mappings_for_fuzzer(fuzzer, mappings=None):
    """Clear existing mappings for a fuzzer, and replace them."""
    if mappings is None:
        mappings = fuzzer.jobs

    query = data_types.FuzzerJob.query()
    query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)
    entities = ndb_utils.get_all_from_query(query)
    old_mappings = {}
    for entity in entities:
        old_mappings[(entity.job, entity.platform)] = entity

    new_mappings = []
    for job_name in mappings:
        job = data_types.Job.query(data_types.Job.name == job_name).get()
        if not job:
            logs.log_error('An unknown job %s was selected for fuzzer %s.' %
                           (job_name, fuzzer.name))
            continue

        mapping = old_mappings.pop((job_name, job.platform), None)
        if mapping:
            continue

        mapping = data_types.FuzzerJob()
        mapping.fuzzer = fuzzer.name
        mapping.job = job_name
        mapping.platform = job.platform
        new_mappings.append(mapping)

    ndb.put_multi(new_mappings)
    ndb.delete_multi([m.key for m in list(old_mappings.values())])
def execute_task(*_):
    """Execute the report uploads."""
    logs.log('Uploading pending reports.')

    # Get metadata for reports requiring upload.
    reports_metadata = ndb_utils.get_all_from_query(
        data_types.ReportMetadata.query(
            ndb_utils.is_false(data_types.ReportMetadata.is_uploaded)))
    reports_metadata = list(reports_metadata)
    if not reports_metadata:
        logs.log('No reports that need upload found.')
        return

    environment.set_value('UPLOAD_MODE', 'prod')

    # Otherwise, upload corresponding reports.
    logs.log('Uploading reports for testcases: %s' %
             str([report.testcase_id for report in reports_metadata]))

    report_metadata_to_delete = []
    for report_metadata in reports_metadata:
        # Convert metadata back into actual report.
        crash_info = crash_uploader.crash_report_info_from_metadata(
            report_metadata)
        testcase_id = report_metadata.testcase_id

        try:
            _ = data_handler.get_testcase_by_id(testcase_id)
        except errors.InvalidTestcaseError:
            logs.log_warn('Could not find testcase %s.' % testcase_id)
            report_metadata_to_delete.append(report_metadata.key)
            continue

        # Upload the report and update the corresponding testcase info.
        logs.log('Processing testcase %s for crash upload.' % testcase_id)
        crash_report_id = crash_info.upload()
        if crash_report_id is None:
            logs.log_error(
                'Crash upload for testcase %s failed, retry later.' %
                testcase_id)
            continue

        # Update the report metadata to indicate successful upload.
        report_metadata.crash_report_id = crash_report_id
        report_metadata.is_uploaded = True
        report_metadata.put()

        logs.log('Uploaded testcase %s to crash, got back report id %s.' %
                 (testcase_id, crash_report_id))
        time.sleep(1)

    # Delete report metadata entries where testcase does not exist anymore or
    # upload is not supported.
    if report_metadata_to_delete:
        ndb_utils.delete_multi(report_metadata_to_delete)

    # Log done with uploads.
    # Deletion happens in batches in cleanup_task, so that in case of error there
    # is some buffer for looking at stored ReportMetadata in the meantime.
    logs.log('Finished uploading crash reports.')
Exemple #4
0
def get_revisions_list(bucket_path, testcase=None):
  """Returns a sorted ascending list of revisions from a bucket path, excluding
  bad build revisions and testcase crash revision (if any)."""
  revision_pattern = revisions.revision_pattern_from_build_bucket_path(
      bucket_path)

  revision_urls = get_build_urls_list(bucket_path, reverse=False)
  if not revision_urls:
    return None

  # Parse the revisions out of the build urls.
  revision_list = []
  for url in revision_urls:
    match = re.match(revision_pattern, url)
    if match:
      revision = revisions.convert_revision_to_integer(match.group(1))
      revision_list.append(revision)

  # Remove revisions for bad builds from the revision list.
  job_type = environment.get_value('JOB_NAME')
  bad_builds = ndb_utils.get_all_from_query(
      data_types.BuildMetadata.query(
          ndb_utils.is_true(data_types.BuildMetadata.bad_build),
          data_types.BuildMetadata.job_type == job_type))
  for bad_build in bad_builds:
    # Don't remove testcase revision even if it is in bad build list. This
    # usually happens when a bad bot sometimes marks a particular revision as
    # bad due to flakiness.
    if testcase and bad_build.revision == testcase.crash_revision:
      continue

    if bad_build.revision in revision_list:
      revision_list.remove(bad_build.revision)

  return revision_list
def _query_and_upload_strategy_probabilities(engine):
  """Uploads queried data into datastore.

  Calls query functions and uploads query results
  to datastore to use as new probabilities. Probabilities
  are based on new_edges feature."""
  strategy_data = []
  data = _query_multi_armed_bandit_probabilities(engine)
  logs.log('Queried distribution for {}.'.format(engine.name))

  # TODO(mukundv): Update once we choose a temperature parameter for final
  # implementation.
  for row in data:
    curr_strategy = data_types.FuzzStrategyProbability()
    curr_strategy.strategy_name = str(row['strategy'])
    curr_strategy.probability = float(row['bandit_weight'])
    curr_strategy.engine = engine.name
    strategy_data.append(curr_strategy)

  query = data_types.FuzzStrategyProbability.query(
      data_types.FuzzStrategyProbability.engine == engine.name)
  ndb_utils.delete_multi(
      [entity.key for entity in ndb_utils.get_all_from_query(query)])
  ndb_utils.put_multi(strategy_data)
  logs.log('Uploaded queried distribution to ndb for {}'.format(engine.name))
  _store_probabilities_in_bigquery(engine, data)
  logs.log('Uploaded queried distribution to BigQuery for {}'.format(
      engine.name))
Exemple #6
0
def generate_weighted_strategy_pool():
  """Generate a strategy pool based on probability
  distribution from multi armed bandit experimentation."""
  query = data_types.FuzzStrategyProbability.query()
  distribution = list(ndb_utils.get_all_from_query(query))

  # If we are not able to query properly, draw randomly according to
  # probability parameters.
  if (not distribution or
      not environment.get_value('USE_BANDIT_STRATEGY_SELECTION')):
    return generate_default_strategy_pool()

  strategy_selection = utils.random_weighted_choice(distribution, 'probability')
  strategy_name = strategy_selection.strategy_name

  chosen_strategies = strategy_name.split(',')
  pool = StrategyPool()

  for strategy_tuple in strategy.strategy_list:
    if strategy_tuple.name in chosen_strategies:
      pool.add_strategy(strategy_tuple)

  # We consider mutator plugin separately as it is only supported by a small
  # number of fuzz targets and should be used heavily when available.
  if do_strategy(strategy.MUTATOR_PLUGIN_STRATEGY):
    pool.add_strategy(strategy.MUTATOR_PLUGIN_STRATEGY)

  return pool
def get_open_testcases_with_bugs():
  """Return iterator to open testcases with bugs."""
  return ndb_utils.get_all_from_query(
      data_types.Testcase.query(
          ndb_utils.is_true(data_types.Testcase.open),
          data_types.Testcase.status == 'Processed',
          data_types.Testcase.bug_information != '').order(  # pylint: disable=g-explicit-bool-comparison
              data_types.Testcase.bug_information, data_types.Testcase.key))
Exemple #8
0
def get_testcase_ids_in_group(group_id):
  """Return the all testcase ids in the specified group."""
  if not group_id or not str(group_id).isdigit():
    return []

  query = ndb_utils.get_all_from_query(
      data_types.Testcase.query(data_types.Testcase.group_id == int(group_id)),
      keys_only=True)
  return [key.id() for key in query]
Exemple #9
0
def get_open_testcase_id_iterator():
  """Get an iterator for open testcase ids."""
  keys = ndb_utils.get_all_from_query(
      data_types.Testcase.query(
          ndb_utils.is_true(data_types.Testcase.open),
          data_types.Testcase.status == 'Processed'),
      keys_only=True,
      batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT)
  for key in keys:
    yield key.id()
Exemple #10
0
def cleanup_unused_heartbeats():
    """Clean up unused heartbeat entities."""
    cutoff_time = utils.utcnow() - datetime.timedelta(
        days=UNUSED_HEARTBEAT_THRESHOLD)
    unused_heartbeats = ndb_utils.get_all_from_query(
        data_types.Heartbeat.query(
            data_types.Heartbeat.last_beat_time < cutoff_time),
        keys_only=True)

    ndb.delete_multi(unused_heartbeats)
def update_platform_for_job(job_name, new_platform):
    """Update platform for all mappings for a particular job."""
    query = data_types.FuzzerJob.query()
    query = query.filter(data_types.FuzzerJob.job == job_name)
    mappings = ndb_utils.get_all_from_query(query)
    new_mappings = []
    for mapping in mappings:
        mapping.platform = new_platform
        new_mappings.append(mapping)
    ndb.put_multi(new_mappings)
Exemple #12
0
def cleanup_testcases_and_issues():
    """Clean up unneeded open testcases and their associated issues."""
    jobs = data_handler.get_all_job_type_names()
    testcase_keys = ndb_utils.get_all_from_query(data_types.Testcase.query(
        ndb_utils.is_false(data_types.Testcase.triaged)),
                                                 keys_only=True)
    top_crashes_by_project_and_platform_map = (
        get_top_crashes_for_all_projects_and_platforms())

    for testcase_key in testcase_keys:
        try:
            testcase = data_handler.get_testcase_by_id(testcase_key.id())
        except errors.InvalidTestcaseError:
            # Already deleted.
            continue

        issue = issue_tracker_utils.get_issue_for_testcase(testcase)
        policy = issue_tracker_utils.get_issue_tracker_policy_for_testcase(
            testcase)
        if not policy:
            policy = issue_tracker_policy.get_empty()

        # Issue updates.
        update_os_labels(policy, testcase, issue)
        update_fuzz_blocker_label(policy, testcase, issue,
                                  top_crashes_by_project_and_platform_map)
        update_component_labels(testcase, issue)
        update_issue_ccs_from_owners_file(policy, testcase, issue)
        update_issue_owner_and_ccs_from_predator_results(
            policy, testcase, issue)
        update_issue_labels_for_flaky_testcase(policy, testcase, issue)

        # Testcase marking rules.
        mark_duplicate_testcase_as_closed_with_no_issue(testcase)
        mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue)
        mark_testcase_as_closed_if_issue_is_closed(policy, testcase, issue)
        mark_testcase_as_closed_if_job_is_invalid(testcase, jobs)
        mark_unreproducible_testcase_as_fixed_if_issue_is_closed(
            testcase, issue)
        mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
            policy, testcase, issue)

        # Notification, to be done at end after testcase state is updated from
        # previous rules.
        notify_closed_issue_if_testcase_is_open(policy, testcase, issue)
        notify_issue_if_testcase_is_invalid(policy, testcase, issue)
        notify_uploader_when_testcase_is_processed(policy, testcase, issue)

        # Mark testcase as triage complete if both testcase and associated issue
        # are closed. This also need to be done before the deletion rules.
        mark_testcase_as_triaged_if_needed(testcase, issue)

        # Testcase deletion rules.
        delete_unreproducible_testcase_with_no_issue(testcase)
Exemple #13
0
def remove_unused_builds():
  """Remove any builds that are no longer in use by this bot."""
  builds_directory = environment.get_value('BUILDS_DIR')
  last_checked_time = persistent_cache.get_value(
      LAST_UNUSED_BUILD_CHECK_KEY,
      constructor=datetime.datetime.utcfromtimestamp)
  if (last_checked_time is not None and
      not dates.time_has_expired(last_checked_time, days=1)):
    return

  # Initialize the map with all of our build directories.
  build_in_use_map = {}
  for build_directory in os.listdir(builds_directory):
    absolute_build_directory = os.path.join(builds_directory, build_directory)
    if os.path.isdir(absolute_build_directory):
      build_in_use_map[absolute_build_directory] = False

  # Platforms for jobs may come from the queue override, but use the default
  # if no override is present.
  job_platform = environment.get_platform_group()
  jobs_for_platform = ndb_utils.get_all_from_query(
      data_types.Job.query(data_types.Job.platform == job_platform))
  for job in jobs_for_platform:
    job_environment = job.get_environment()

    # Do not attempt to process any incomplete job definitions.
    if not job_environment:
      continue

    for key, value in job_environment.iteritems():
      if 'BUILD_BUCKET_PATH' in key:
        bucket_path = value
      elif key == 'CUSTOM_BINARY' and value != 'False':
        bucket_path = None
      else:
        continue

      # If we made it to this point, this build is potentially in use.
      build_directory = _get_build_directory(bucket_path, job.name)
      if build_directory in build_in_use_map:
        build_in_use_map[build_directory] = True

  for build_directory, in_use in build_in_use_map.iteritems():
    if in_use:
      continue

    # Remove the build.
    logs.log('Removing unused build directory: %s' % build_directory)
    shell.remove_directory(build_directory)

  persistent_cache.set_value(LAST_UNUSED_BUILD_CHECK_KEY, time.time())
Exemple #14
0
def find_testcase(project_name,
                  crash_type,
                  crash_state,
                  security_flag,
                  testcase_to_exclude=None):
  """Find an open test case matching certain parameters."""
  # Prepare the query.
  query = data_types.Testcase.query(
      data_types.Testcase.project_name == project_name,
      data_types.Testcase.crash_type == crash_type,
      data_types.Testcase.crash_state == crash_state,
      data_types.Testcase.security_flag == security_flag,
      data_types.Testcase.status == 'Processed',
      ndb_utils.is_true(data_types.Testcase.open))

  # Return any open (not fixed) test cases if they exist.
  testcases = ndb_utils.get_all_from_query(query)
  testcase = None
  testcase_quality = -1
  for current_testcase in testcases:
    if (testcase_to_exclude and
        current_testcase.key.id() == testcase_to_exclude.key.id()):
      continue
    if current_testcase.duplicate_of:
      continue

    # Replace the current test case in various situations where we have found
    # a better one to use. Testcase quality is based on the following factors:
    # - Is this test case reproducible? Reproducible tests are preferred.
    # - Is there a bug for this? We prefer showing tests with bugs to point
    #   users to existing bugs.
    # - Is this test case minimized ? Minimization confirms that testcase is
    #   reproducible and more usable for reproduction.
    current_testcase_quality = 0
    if not current_testcase.one_time_crasher_flag:
      current_testcase_quality |= 2**2
    if current_testcase.bug_information:
      current_testcase_quality |= 2**1
    if current_testcase.minimized_keys:
      current_testcase_quality |= 2**0

    if current_testcase_quality > testcase_quality:
      testcase = current_testcase
      testcase_quality = current_testcase_quality

    if testcase_quality == MAX_TESTCASE_QUALITY:
      # Already found the best testcase possible, no more work to do. Bail out.
      break

  return testcase
Exemple #15
0
def get_fuzz_targets(engine=None, project=None, binary=None):
  """Return a Datastore query for fuzz targets."""
  query = data_types.FuzzTarget().query()

  if engine:
    query = query.filter(data_types.FuzzTarget.engine == engine)

  if project:
    query = query.filter(data_types.FuzzTarget.project == project)

  if binary:
    query = query.filter(data_types.FuzzTarget.binary == binary)

  return ndb_utils.get_all_from_query(query)
def get_fuzz_task_payload(platform=None):
    """Select a fuzzer that can run on this platform."""
    if not platform:
        queue_override = environment.get_value('QUEUE_OVERRIDE')
        platform = queue_override if queue_override else environment.platform()

    query = data_types.FuzzerJob.query()
    query = query.filter(data_types.FuzzerJob.platform == platform)

    mappings = list(ndb_utils.get_all_from_query(query))
    if not mappings:
        return None, None

    selection = utils.random_weighted_choice(mappings)
    return selection.fuzzer, selection.job
Exemple #17
0
def get_similar_issues(testcase,
                       can=IssueTrackerManager.CAN_ALL,
                       issue_tracker_manager=None):
    """Get issue objects that seem to be related to a particular test case."""
    if not issue_tracker_manager:
        issue_tracker_manager = get_issue_tracker_manager(testcase)

    # Get list of issues using the search query.
    search_text = get_similar_issues_query(testcase)
    issue_objects = issue_tracker_manager.get_issues(search_text, can=can)
    issue_ids = [issue.id for issue in issue_objects]

    # Add issues from similar testcases sharing the same group id.
    if testcase.group_id:
        group_query = data_types.Testcase.query(
            data_types.Testcase.group_id == testcase.group_id)
        similar_testcases = ndb_utils.get_all_from_query(group_query)
        for similar_testcase in similar_testcases:
            if not similar_testcase.bug_information:
                continue

            # Exclude issues already added above from search terms.
            issue_id = int(similar_testcase.bug_information)
            if issue_id in issue_ids:
                continue

            # Get issue object using ID.
            issue = issue_tracker_manager.get_issue(issue_id)
            if not issue:
                continue

            # If our search criteria allows open bugs only, then check issue and
            # testcase status so as to exclude closed ones.
            if (can == IssueTrackerManager.CAN_OPEN
                    and (not issue.open or not testcase.open)):
                continue

            issue_objects.append(issue)
            issue_ids.append(issue_id)

    return issue_objects
def get_similar_issues(issue_tracker, testcase, only_open=True):
    """Get issue objects that seem to be related to a particular test case."""
    # Get list of issues using the search query.
    keywords = get_search_keywords(testcase)

    issues = issue_tracker.find_issues(keywords=keywords, only_open=only_open)
    if issues:
        issues = list(issues)
    else:
        issues = []

    issue_ids = [issue.id for issue in issues]

    # Add issues from similar testcases sharing the same group id.
    if testcase.group_id:
        group_query = data_types.Testcase.query(
            data_types.Testcase.group_id == testcase.group_id)
        similar_testcases = ndb_utils.get_all_from_query(group_query)
        for similar_testcase in similar_testcases:
            if not similar_testcase.bug_information:
                continue

            # Exclude issues already added above from search terms.
            issue_id = int(similar_testcase.bug_information)
            if issue_id in issue_ids:
                continue

            # Get issue object using ID.
            issue = issue_tracker.get_issue(issue_id)
            if not issue:
                continue

            # If our search criteria allows open bugs only, then check issue and
            # testcase status so as to exclude closed ones.
            if (only_open and (not issue.is_open or not testcase.open)):
                continue

            issues.append(issue)
            issue_ids.append(issue_id)

    return issues
def get_fuzz_target_jobs(fuzz_target_name=None,
                         engine=None,
                         job=None,
                         limit=None):
    """Return a Datastore query for fuzz target to job mappings."""
    query = data_types.FuzzTargetJob.query()

    if fuzz_target_name:
        query = query.filter(
            data_types.FuzzTargetJob.fuzz_target_name == fuzz_target_name)

    if job:
        query = query.filter(data_types.FuzzTargetJob.job == job)

    if engine:
        query = query.filter(data_types.FuzzTargetJob.engine == engine)

    if limit is not None:
        return query.iter(limit=limit)

    return ndb_utils.get_all_from_query(query)
Exemple #20
0
def get_fuzz_task_payload(platform=None):
    """Select a fuzzer that can run on this platform."""
    if not platform:
        queue_override = environment.get_value('QUEUE_OVERRIDE')
        platform = queue_override if queue_override else environment.platform()

    if environment.is_local_development():
        query = data_types.FuzzerJob.query()
        query = query.filter(data_types.FuzzerJobs.platform == platform)
        mappings = list(ndb_utils.get_all_from_query(query))
    else:
        query = data_types.FuzzerJobs.query()
        query = query.filter(data_types.FuzzerJobs.platform == platform)

        mappings = []
        for entity in query:
            mappings.extend(entity.fuzzer_jobs)

    if not mappings:
        return None, None

    selection = utils.random_weighted_choice(mappings,
                                             weight_attribute='actual_weight')
    return selection.fuzzer, selection.job
Exemple #21
0
    def post(self):
        """Handle a post request."""
        key = helpers.get_integer_key(self.request)
        job = ndb.Key(data_types.Job, key).get()
        if not job:
            raise helpers.EarlyExitException('Job not found.', 400)

        # Delete from fuzzers' jobs' list.
        for fuzzer in ndb_utils.get_all_from_model(data_types.Fuzzer):
            if job.name in fuzzer.jobs:
                fuzzer.jobs.remove(job.name)
                fuzzer.put()

        # Delete associated fuzzer-job mapping(s).
        query = data_types.FuzzerJob.query()
        query = query.filter(data_types.FuzzerJob.job == job.name)
        for mapping in ndb_utils.get_all_from_query(query):
            mapping.key.delete()

        # Delete job.
        job.key.delete()

        helpers.log('Deleted job %s' % job.name, helpers.MODIFY_OPERATION)
        self.redirect('/jobs')
Exemple #22
0
def _check_and_update_similar_bug(testcase, issue_tracker):
    """Get list of similar open issues and ones that were recently closed."""
    # Get similar testcases from the same group.
    similar_testcases_from_group = []
    if testcase.group_id:
        group_query = data_types.Testcase.query(
            data_types.Testcase.group_id == testcase.group_id)
        similar_testcases_from_group = ndb_utils.get_all_from_query(
            group_query,
            batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT // 2)

    # Get testcases with the same crash params. These might not be in the a group
    # if they were just fixed.
    same_crash_params_query = data_types.Testcase.query(
        data_types.Testcase.crash_type == testcase.crash_type,
        data_types.Testcase.crash_state == testcase.crash_state,
        data_types.Testcase.security_flag == testcase.security_flag,
        data_types.Testcase.project_name == testcase.project_name,
        data_types.Testcase.status == 'Processed')

    similar_testcases_from_query = ndb_utils.get_all_from_query(
        same_crash_params_query,
        batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT // 2)
    for similar_testcase in itertools.chain(similar_testcases_from_group,
                                            similar_testcases_from_query):
        # Exclude ourself from comparison.
        if similar_testcase.key.id() == testcase.key.id():
            continue

        # Exclude similar testcases without bug information.
        if not similar_testcase.bug_information:
            continue

        # Get the issue object given its ID.
        issue = issue_tracker.get_issue(similar_testcase.bug_information)
        if not issue:
            continue

        # If the reproducible issue is not verified yet, bug is still valid and
        # might be caused by non-availability of latest builds. In that case,
        # don't file a new bug yet.
        if similar_testcase.open and not similar_testcase.one_time_crasher_flag:
            return True

        # If the issue is still open, no need to file a duplicate bug.
        if issue.is_open:
            return True

        # If the issue indicates that this crash needs to be ignored, no need to
        # file another one.
        policy = issue_tracker_policy.get(issue_tracker.project)
        ignore_label = policy.label('ignore')
        if ignore_label in issue.labels:
            _add_triage_message(testcase, (
                'Skipping filing a bug since similar testcase ({testcase_id}) in '
                'issue ({issue_id}) is blacklisted with {ignore_label} label.'
            ).format(testcase_id=similar_testcase.key.id(),
                     issue_id=issue.id,
                     ignore_label=ignore_label))
            return True

        # If the issue is recently closed, wait certain time period to make sure
        # our fixed verification has completed.
        if (issue.closed_time
                and not dates.time_has_expired(
                    issue.closed_time,
                    hours=data_types.MIN_ELAPSED_TIME_SINCE_FIXED)):
            _add_triage_message(
                testcase,
                ('Delaying filing a bug since similar testcase '
                 '({testcase_id}) in issue ({issue_id}) was just fixed.'
                 ).format(testcase_id=similar_testcase.key.id(),
                          issue_id=issue.id))
            return True

    return False
Exemple #23
0
def update_fuzzer_and_data_bundles(fuzzer_name):
    """Update the fuzzer with a given name if necessary."""
    fuzzer = data_types.Fuzzer.query(
        data_types.Fuzzer.name == fuzzer_name).get()
    if not fuzzer:
        logs.log_error('No fuzzer exists with name %s.' % fuzzer_name)
        raise errors.InvalidFuzzerError

    # Set some helper environment variables.
    fuzzer_directory = get_fuzzer_directory(fuzzer_name)
    environment.set_value('FUZZER_DIR', fuzzer_directory)
    environment.set_value('UNTRUSTED_CONTENT', fuzzer.untrusted_content)

    # Adjust the test timeout, if user has provided one.
    if fuzzer.timeout:
        environment.set_value('TEST_TIMEOUT', fuzzer.timeout)

        # Increase fuzz test timeout if the fuzzer timeout is higher than its
        # current value.
        fuzz_test_timeout = environment.get_value('FUZZ_TEST_TIMEOUT')
        if fuzz_test_timeout and fuzz_test_timeout < fuzzer.timeout:
            environment.set_value('FUZZ_TEST_TIMEOUT', fuzzer.timeout)

    # Adjust the max testcases if this fuzzer has specified a lower limit.
    max_testcases = environment.get_value('MAX_TESTCASES')
    if fuzzer.max_testcases and fuzzer.max_testcases < max_testcases:
        environment.set_value('MAX_TESTCASES', fuzzer.max_testcases)

    # Check for updates to this fuzzer.
    version_file = os.path.join(fuzzer_directory, '.%s_version' % fuzzer_name)
    if (not fuzzer.builtin
            and revisions.needs_update(version_file, fuzzer.revision)):
        logs.log('Fuzzer update was found, updating.')

        # Clear the old fuzzer directory if it exists.
        if not shell.remove_directory(fuzzer_directory, recreate=True):
            logs.log_error('Failed to clear fuzzer directory.')
            return False

        # Copy the archive to local disk and unpack it.
        archive_path = os.path.join(fuzzer_directory, fuzzer.filename)
        if not blobs.read_blob_to_disk(fuzzer.blobstore_key, archive_path):
            logs.log_error('Failed to copy fuzzer archive.')
            return False

        try:
            archive.unpack(archive_path, fuzzer_directory)
        except Exception:
            error_message = (
                'Failed to unpack fuzzer archive %s '
                '(bad archive or unsupported format).') % fuzzer.filename
            logs.log_error(error_message)
            fuzzer_logs.upload_script_log('Fatal error: ' + error_message,
                                          fuzzer_name=fuzzer_name)
            return False

        fuzzer_path = os.path.join(fuzzer_directory, fuzzer.executable_path)
        if not os.path.exists(fuzzer_path):
            error_message = (
                'Fuzzer executable %s not found. '
                'Check fuzzer configuration.') % fuzzer.executable_path
            logs.log_error(error_message)
            fuzzer_logs.upload_script_log('Fatal error: ' + error_message,
                                          fuzzer_name=fuzzer_name)
            return False

        # Make fuzzer executable.
        os.chmod(fuzzer_path, 0o750)

        # Cleanup unneeded archive.
        shell.remove_file(archive_path)

        # Save the current revision of this fuzzer in a file for later checks.
        revisions.write_revision_to_revision_file(version_file,
                                                  fuzzer.revision)
        logs.log('Updated fuzzer to revision %d.' % fuzzer.revision)

    # Setup data bundles associated with this fuzzer.
    data_bundles = ndb_utils.get_all_from_query(
        data_types.DataBundle.query(
            data_types.DataBundle.name == fuzzer.data_bundle_name))
    for data_bundle in data_bundles:
        if not update_data_bundle(fuzzer, data_bundle):
            return False

    # Setup environment variable for launcher script path.
    if fuzzer.launcher_script:
        fuzzer_launcher_path = shell.get_execute_command(
            os.path.join(fuzzer_directory, fuzzer.launcher_script))
        environment.set_value('LAUNCHER_PATH', fuzzer_launcher_path)

    return True
Exemple #24
0
def is_similar_bug_open_or_recently_closed(testcase, issue_tracker_manager):
    """Get list of similar open issues and ones that were recently closed."""
    # Get similar testcases from the same group.
    similar_testcases_from_group = []
    if testcase.group_id:
        group_query = data_types.Testcase.query(
            data_types.Testcase.group_id == testcase.group_id)
        similar_testcases_from_group = ndb_utils.get_all_from_query(
            group_query, batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT / 2)

    # Get testcases with the same crash params. These might not be in the a group
    # if they were just fixed.
    same_crash_params_query = data_types.Testcase.query(
        data_types.Testcase.crash_type == testcase.crash_type,
        data_types.Testcase.crash_state == testcase.crash_state,
        data_types.Testcase.security_flag == testcase.security_flag,
        data_types.Testcase.project_name == testcase.project_name,
        data_types.Testcase.status == 'Processed')

    similar_testcases_from_query = ndb_utils.get_all_from_query(
        same_crash_params_query,
        batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT / 2)

    for similar_testcase in itertools.chain(similar_testcases_from_group,
                                            similar_testcases_from_query):
        # Exclude ourself from comparison.
        if similar_testcase.key.id() == testcase.key.id():
            continue

        # Exclude similar testcases without bug information.
        if not similar_testcase.bug_information:
            continue

        # Get the issue object given its ID.
        issue = issue_tracker_manager.get_issue(
            similar_testcase.bug_information)
        if not issue:
            continue

        # If the reproducible issue is not verified yet, bug is still valid and
        # might be caused by non-availability of latest builds. In that case,
        # don't file a new bug yet.
        if similar_testcase.open and not similar_testcase.one_time_crasher_flag:
            return True

        # If the issue is still open, no need to file a duplicate bug.
        if issue.open:
            return True

        # If the issue indicates that this crash needs to be ignored, no need to
        # file another one.
        if issue.has_label(data_types.ISSUE_IGNORE_LABEL):
            return True

        # If the issue is recently closed, wait certain time period to make sure
        # our fixed verification has completed.
        if (issue.closed and not dates.time_has_expired(
                issue.closed,
                compare_to=datetime.datetime.utcnow(),
                hours=data_types.MIN_ELAPSED_TIME_SINCE_FIXED)):
            return True

    return False
Exemple #25
0
def _get_job_list_for_fuzzer(fuzzer):
    """Helper function to return the mappings for a fuzzer as a list."""
    query = data_types.FuzzerJob.query()
    query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)
    return [m.job for m in ndb_utils.get_all_from_query(query)]
Exemple #26
0
def redo_testcase(testcase, tasks, user_email):
    """Redo specific tasks for a testcase."""
    for task in tasks:
        if task not in VALID_REDO_TASKS:
            raise InvalidRedoTask(task)

    minimize = 'minimize' in tasks
    regression = 'regression' in tasks
    progression = 'progression' in tasks
    impact = 'impact' in tasks
    blame = 'blame' in tasks

    task_list = []
    testcase_id = testcase.key.id()

    # Metadata keys to clear based on which redo tasks were selected.
    metadata_keys_to_clear = ['potentially_flaky']

    if minimize:
        task_list.append('minimize')
        testcase.minimized_keys = ''
        testcase.set_metadata('redo_minimize', True, update_testcase=False)
        metadata_keys_to_clear += [
            'env', 'current_minimization_phase_attempts', 'minimization_phase'
        ]

        # If this testcase was archived during minimization, update the state.
        testcase.archive_state &= ~data_types.ArchiveStatus.MINIMIZED

    if regression:
        task_list.append('regression')
        testcase.regression = ''
        metadata_keys_to_clear += [
            'last_regression_min', 'last_regression_max'
        ]

    if progression:
        task_list.append('progression')
        testcase.fixed = ''
        testcase.open = True
        testcase.last_tested_crash_stacktrace = None
        testcase.triaged = False
        testcase.set_metadata('progression_pending',
                              True,
                              update_testcase=False)
        metadata_keys_to_clear += [
            'last_progression_min', 'last_progression_max',
            'last_tested_revision'
        ]

    if impact:
        task_list.append('impact')
        testcase.is_impact_set_flag = False

    if blame:
        task_list.append('blame')
        testcase.set_metadata('blame_pending', True, update_testcase=False)
        testcase.set_metadata('predator_result', None, update_testcase=False)

    for key in metadata_keys_to_clear:
        testcase.delete_metadata(key, update_testcase=False)

    testcase.comments += '[%s] %s: Redo task(s): %s\n' % (
        utils.current_date_time(), user_email, ', '.join(sorted(task_list)))
    testcase.one_time_crasher_flag = False
    testcase.put()

    # Allow new notifications to be sent for this testcase.
    notifications = ndb_utils.get_all_from_query(data_types.Notification.query(
        data_types.Notification.testcase_id == testcase.key.id()),
                                                 keys_only=True)
    ndb_utils.delete_multi(notifications)

    # If we are re-doing minimization, other tasks will be done automatically
    # after minimization completes. So, don't add those tasks.
    if minimize:
        add_task('minimize', testcase_id, testcase.job_type,
                 queue_for_testcase(testcase))
    else:
        if regression:
            add_task('regression', testcase_id, testcase.job_type,
                     queue_for_testcase(testcase))

        if progression:
            add_task('progression', testcase_id, testcase.job_type,
                     queue_for_testcase(testcase))

        if impact:
            add_task('impact', testcase_id, testcase.job_type,
                     queue_for_testcase(testcase))

        if blame:
            add_task('blame', testcase_id, testcase.job_type,
                     queue_for_testcase(testcase))
Exemple #27
0
def get_fuzzing_engines():
  """Return the fuzzing engines currently running."""
  query = data_types.FuzzTarget.query(
      projection=[data_types.FuzzTarget.engine], distinct=True)
  return [f.engine for f in ndb_utils.get_all_from_query(query)]