Exemplo n.º 1
0
def update_testcase_comment(testcase, task_state, message=None):
  """Add task status and message to the test case's comment field."""
  bot_name = environment.get_value('BOT_NAME', 'Unknown')
  task_name = environment.get_value('TASK_NAME', 'Unknown')
  task_string = '%s task' % task_name.capitalize()
  timestamp = utils.current_date_time()

  # For some tasks like blame, progression and impact, we need to delete lines
  # from old task executions to avoid clutter.
  if (task_name in ['blame', 'progression', 'impact'] and
      task_state == data_types.TaskState.STARTED):
    pattern = r'.*?: %s.*\n' % task_string
    testcase.comments = re.sub(pattern, '', testcase.comments)

  testcase.comments += '[%s] %s: %s %s' % (timestamp, bot_name, task_string,
                                           task_state)
  if message:
    testcase.comments += ': %s' % message
  testcase.comments += '.\n'
  testcase.put()

  # Log the message in stackdriver after the testcase.put() call as otherwise
  # the testcase key might not available yet (i.e. for new testcase).
  if message:
    log_func = (
        logs.log_error
        if task_state == data_types.TaskState.ERROR else logs.log)
    log_func('%s (testcase %s, job %s).' % (message, testcase.key.id(),
                                            testcase.job_type))
Exemplo n.º 2
0
def create_user_uploaded_testcase(key,
                                  original_key,
                                  archive_state,
                                  filename,
                                  file_path_input,
                                  timeout,
                                  job_type,
                                  queue,
                                  http_flag,
                                  gestures,
                                  additional_arguments,
                                  bug_information,
                                  crash_revision,
                                  uploader_email,
                                  platform_id,
                                  app_launch_command,
                                  fuzzer_name,
                                  fully_qualified_fuzzer_name,
                                  fuzzer_binary_name,
                                  bundled,
                                  retries,
                                  bug_summary_update_flag,
                                  additional_metadata=None):
  """Create a testcase object, metadata, and task for a user uploaded test."""
  testcase = data_types.Testcase()
  testcase.crash_type = ''
  testcase.crash_state = 'Pending'
  testcase.crash_address = ''
  testcase.crash_stacktrace = ''
  testcase.fuzzed_keys = key
  testcase.minimized_keys = ''
  testcase.bug_information = ''
  testcase.regression = ''
  testcase.fixed = ''
  testcase.security_flag = False
  testcase.one_time_crasher_flag = False
  testcase.crash_revision = crash_revision
  testcase.comments = '[%s] %s: Analyze task.\n' % (utils.current_date_time(),
                                                    uploader_email)
  testcase.fuzzer_name = fuzzer_name
  testcase.overridden_fuzzer_name = fully_qualified_fuzzer_name or fuzzer_name
  testcase.job_type = job_type
  testcase.http_flag = bool(http_flag)
  testcase.archive_state = archive_state
  testcase.status = 'Pending'
  testcase.project_name = get_project_name(job_type)

  if archive_state or bundled:
    testcase.absolute_path = file_path_input
    testcase.archive_filename = filename
  else:
    testcase.absolute_path = filename
  testcase.gestures = gestures
  if bug_information:
    testcase.bug_information = bug_information
  if platform_id:
    testcase.platform_id = platform_id.strip().lower()
  if additional_arguments:
    testcase.set_metadata(
        'uploaded_additional_args', additional_arguments, update_testcase=False)
  if app_launch_command:
    testcase.set_metadata(
        'app_launch_command', app_launch_command, update_testcase=False)
  if fuzzer_binary_name:
    testcase.set_metadata(
        'fuzzer_binary_name', fuzzer_binary_name, update_testcase=False)

  if additional_metadata:
    for metadata_key, metadata_value in six.iteritems(additional_metadata):
      testcase.set_metadata(metadata_key, metadata_value, update_testcase=False)

  testcase.timestamp = datetime.datetime.utcnow()
  testcase.uploader_email = uploader_email
  testcase.put()

  # Store the testcase upload metadata.
  testcase_id = testcase.key.id()
  metadata = data_types.TestcaseUploadMetadata()
  metadata.security_flag = False
  metadata.filename = filename
  metadata.status = 'Pending'
  metadata.uploader_email = uploader_email
  metadata.testcase_id = testcase_id
  metadata.blobstore_key = key
  metadata.original_blobstore_key = original_key
  metadata.timeout = timeout
  metadata.bundled = bundled
  metadata.retries = retries
  if bundled:
    metadata.path_in_archive = file_path_input
  metadata.timestamp = testcase.timestamp
  metadata.bug_summary_update_flag = bool(bug_summary_update_flag)
  metadata.put()

  # Create the job to analyze the testcase.
  tasks.add_task('analyze', testcase_id, job_type, queue)

  return testcase.key.id()
Exemplo n.º 3
0
def redo_testcase(testcase, tasks, user_email):
    """Redo specific tasks for a testcase."""
    for task in tasks:
        if task not in VALID_REDO_TASKS:
            raise InvalidRedoTask(task)

    minimize = 'minimize' in tasks
    regression = 'regression' in tasks
    progression = 'progression' in tasks
    impact = 'impact' in tasks
    blame = 'blame' in tasks

    task_list = []
    testcase_id = testcase.key.id()

    # Metadata keys to clear based on which redo tasks were selected.
    metadata_keys_to_clear = ['potentially_flaky']

    if minimize:
        task_list.append('minimize')
        testcase.minimized_keys = ''
        testcase.set_metadata('redo_minimize', True, update_testcase=False)
        metadata_keys_to_clear += [
            'env', 'current_minimization_phase_attempts', 'minimization_phase'
        ]

        # If this testcase was archived during minimization, update the state.
        testcase.archive_state &= ~data_types.ArchiveStatus.MINIMIZED

    if regression:
        task_list.append('regression')
        testcase.regression = ''
        metadata_keys_to_clear += [
            'last_regression_min', 'last_regression_max'
        ]

    if progression:
        task_list.append('progression')
        testcase.fixed = ''
        testcase.open = True
        testcase.last_tested_crash_stacktrace = None
        testcase.triaged = False
        testcase.set_metadata('progression_pending',
                              True,
                              update_testcase=False)
        metadata_keys_to_clear += [
            'last_progression_min', 'last_progression_max',
            'last_tested_revision'
        ]

    if impact:
        task_list.append('impact')
        testcase.is_impact_set_flag = False

    if blame:
        task_list.append('blame')
        testcase.set_metadata('blame_pending', True, update_testcase=False)
        testcase.set_metadata('predator_result', None, update_testcase=False)

    for key in metadata_keys_to_clear:
        testcase.delete_metadata(key, update_testcase=False)

    testcase.comments += '[%s] %s: Redo task(s): %s\n' % (
        utils.current_date_time(), user_email, ', '.join(sorted(task_list)))
    testcase.one_time_crasher_flag = False
    testcase.put()

    # Allow new notifications to be sent for this testcase.
    notifications = ndb_utils.get_all_from_query(data_types.Notification.query(
        data_types.Notification.testcase_id == testcase.key.id()),
                                                 keys_only=True)
    ndb_utils.delete_multi(notifications)

    # If we are re-doing minimization, other tasks will be done automatically
    # after minimization completes. So, don't add those tasks.
    if minimize:
        add_task('minimize', testcase_id, testcase.job_type,
                 queue_for_testcase(testcase))
    else:
        if regression:
            add_task('regression', testcase_id, testcase.job_type,
                     queue_for_testcase(testcase))

        if progression:
            add_task('progression', testcase_id, testcase.job_type,
                     queue_for_testcase(testcase))

        if impact:
            add_task('impact', testcase_id, testcase.job_type,
                     queue_for_testcase(testcase))

        if blame:
            add_task('blame', testcase_id, testcase.job_type,
                     queue_for_testcase(testcase))