예제 #1
0
def execute_task(*_):
    """Execute the report uploads."""
    logs.log('Uploading pending reports.')

    # Get metadata for reports requiring upload.
    reports_metadata = ndb_utils.get_all_from_query(
        data_types.ReportMetadata.query(
            ndb_utils.is_false(data_types.ReportMetadata.is_uploaded)))
    reports_metadata = list(reports_metadata)
    if not reports_metadata:
        logs.log('No reports that need upload found.')
        return

    environment.set_value('UPLOAD_MODE', 'prod')

    # Otherwise, upload corresponding reports.
    logs.log('Uploading reports for testcases: %s' %
             str([report.testcase_id for report in reports_metadata]))

    report_metadata_to_delete = []
    for report_metadata in reports_metadata:
        # Convert metadata back into actual report.
        crash_info = crash_uploader.crash_report_info_from_metadata(
            report_metadata)
        testcase_id = report_metadata.testcase_id

        try:
            _ = data_handler.get_testcase_by_id(testcase_id)
        except errors.InvalidTestcaseError:
            logs.log_warn('Could not find testcase %s.' % testcase_id)
            report_metadata_to_delete.append(report_metadata.key)
            continue

        # Upload the report and update the corresponding testcase info.
        logs.log('Processing testcase %s for crash upload.' % testcase_id)
        crash_report_id = crash_info.upload()
        if crash_report_id is None:
            logs.log_error(
                'Crash upload for testcase %s failed, retry later.' %
                testcase_id)
            continue

        # Update the report metadata to indicate successful upload.
        report_metadata.crash_report_id = crash_report_id
        report_metadata.is_uploaded = True
        report_metadata.put()

        logs.log('Uploaded testcase %s to crash, got back report id %s.' %
                 (testcase_id, crash_report_id))
        time.sleep(1)

    # Delete report metadata entries where testcase does not exist anymore or
    # upload is not supported.
    if report_metadata_to_delete:
        ndb_utils.delete_multi(report_metadata_to_delete)

    # Log done with uploads.
    # Deletion happens in batches in cleanup_task, so that in case of error there
    # is some buffer for looking at stored ReportMetadata in the meantime.
    logs.log('Finished uploading crash reports.')
예제 #2
0
def _allowed_users_for_entity(name, entity_kind, auto_cc=None):
    """Return a list of users that have permissions for the given entity.

  Args:
    name: The name of the entity.
    entity_kind: The type (data_types.PermissionEntityKind) of the entity.
    auto_cc: The Auto CC type (data_types.AutoCCType) to filter on, or None.

  Returns:
    A list of user emails that have permission to access the given entity.
  """
    if not name:
        return []

    # Easy case: direct matches.
    direct_match_permissions = data_types.ExternalUserPermission.query(
        data_types.ExternalUserPermission.entity_kind == entity_kind,
        data_types.ExternalUserPermission.entity_name == name,
        ndb_utils.is_false(data_types.ExternalUserPermission.is_prefix),
        projection=[data_types.ExternalUserPermission.email])
    if auto_cc is not None:
        direct_match_permissions = direct_match_permissions.filter(
            data_types.ExternalUserPermission.auto_cc == auto_cc)

    allowed_users = [
        permission.email for permission in direct_match_permissions
    ]

    # Find all permissions where the prefix matches the fuzzer_name.
    # Unfortunately, Datastore doesn't give us an easy way of doing so. To iterate
    # through a smaller set than every single permission, get all permissions that
    # contain a prefix string <= than the actual fuzzer name and >= the first
    # character.
    prefix_match_permissions = data_types.ExternalUserPermission.query(
        data_types.ExternalUserPermission.entity_kind == entity_kind,
        data_types.ExternalUserPermission.entity_name <= name,
        data_types.ExternalUserPermission.entity_name >= name[0],
        ndb_utils.is_true(data_types.ExternalUserPermission.is_prefix),
        projection=[
            data_types.ExternalUserPermission.email,
            data_types.ExternalUserPermission.entity_name
        ])
    if auto_cc is not None:
        prefix_match_permissions = prefix_match_permissions.filter(
            data_types.ExternalUserPermission.auto_cc == auto_cc)

    for permission in prefix_match_permissions:
        if not permission.entity_name:
            # No external user should have an empty prefix (access to all
            # fuzzers/jobs).
            continue

        if name.startswith(permission.entity_name):
            allowed_users.append(permission.email)

    return sorted(allowed_users)
예제 #3
0
def query_testcase(project_name, crash_type, crash_state, security_flag,
                   is_open):
    """Start a query for an associated testcase."""
    return data_types.Testcase.query(
        data_types.Testcase.project_name == project_name,
        data_types.Testcase.crash_type == crash_type,
        data_types.Testcase.crash_state == crash_state,
        data_types.Testcase.security_flag == security_flag,
        data_types.Testcase.open == is_open,
        ndb_utils.is_false(data_types.Testcase.is_a_duplicate_flag)).order(
            -data_types.Testcase.timestamp).iter(limit=1,
                                                 projection=[
                                                     'bug_information',
                                                     'group_bug_information',
                                                 ])
예제 #4
0
    def get(self):
        """Handle a GET request."""
        assert self.task

        # Create new tasks for the open reproducible test cases.
        for status in ['Processed', 'Duplicate']:
            testcases = data_types.Testcase.query(
                ndb_utils.is_true(data_types.Testcase.open),
                ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag),
                data_types.Testcase.status == status)

            for testcase in testcases:
                try:
                    tasks.add_task(self.task,
                                   testcase.key.id(),
                                   testcase.job_type,
                                   queue=tasks.queue_for_testcase(testcase))
                except Exception:
                    logs.log_error('Failed to add task.')
                    continue
예제 #5
0
def execute(args):
    """Query Testcases of the given projects,
    and conditionally file them to the corresponding GitHub repo."""
    if not args.script_args:
        print('Need at least one project name (with -p) '
              'when running the backfiler.')
        return
    for project_name in args.script_args:
        print(f'Back filing project {project_name}')
        for testcase in data_types.Testcase.query(
                ndb_utils.is_true(data_types.Testcase.open),
                ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag),
                data_types.Testcase.status == 'Processed',
                data_types.Testcase.project_name == project_name,
        ):
            if not testcase.bug_information:
                print(f'Skip testcase without bugs: {testcase.key.id()}')
                continue
            print(f'Back filing testcase id: {testcase.key.id()}')
            if args.non_dry_run:
                oss_fuzz_github.file_issue(testcase)
예제 #6
0
def _is_crash_important(testcase):
  """Indicate if the crash is important to file."""
  if not testcase.one_time_crasher_flag:
    # A reproducible crash is an important crash.
    return True

  if testcase.status != 'Processed':
    # A duplicate or unreproducible crash is not an important crash.
    return False

  # Testcase is unreproducible. Only those crashes that are crashing frequently
  # are important.

  if testcase.crash_type in UNREPRODUCIBLE_CRASH_IGNORE_CRASH_TYPES:
    return False

  # Ensure that there is no reproducible testcase in our group.
  if testcase.group_id:
    other_reproducible_testcase = data_types.Testcase.query(
        data_types.Testcase.group_id == testcase.group_id,
        ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
    if other_reproducible_testcase:
      # There is another reproducible testcase in our group. So, this crash is
      # not important.
      return False

  # Get crash statistics data on this unreproducible crash for last X days.
  last_hour = crash_stats.get_last_successful_hour()
  if not last_hour:
    # No crash stats available, skip.
    return False

  _, rows = crash_stats.get(
      end=last_hour,
      block='day',
      days=data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE,
      group_by='reproducible_flag',
      where_clause=(
          'crash_type = %s AND crash_state = %s AND security_flag = %s' %
          (json.dumps(testcase.crash_type), json.dumps(testcase.crash_state),
           json.dumps(testcase.security_flag))),
      group_having_clause='',
      sort_by='total_count',
      offset=0,
      limit=1)

  # Calculate total crash count and crash days count.
  crash_days_indices = set([])
  total_crash_count = 0
  for row in rows:
    if 'groups' not in row:
      continue

    total_crash_count += row['totalCount']
    for group in row['groups']:
      for index in group['indices']:
        crash_days_indices.add(index['hour'])

  crash_days_count = len(crash_days_indices)

  # Only those unreproducible testcases are important that happened atleast once
  # everyday for the last X days and total crash count exceeded our threshold
  # limit.
  return (crash_days_count ==
          data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE and
          total_crash_count >=
          data_types.FILE_UNREPRODUCIBLE_TESTCASE_MIN_CRASH_THRESHOLD)