示例#1
0
  def _get_profile_index(self):
    """Get the first available profile directory index."""
    with self._profile_lock:
      for index, is_available in enumerate(self._available_profiles):
        if is_available:
          self._available_profiles[index] = False
          return index

    # Raise an exception rather than running in a bad state.
    raise errors.BadStateError('No profile directories available.')
示例#2
0
def _sort_build_urls_by_revision(build_urls, bucket_path, reverse):
  """Return a sorted list of build url by revision."""
  base_url = os.path.dirname(bucket_path)
  file_pattern = os.path.basename(bucket_path)
  filename_by_revision_dict = {}

  _, base_path = storage.get_bucket_name_and_path(base_url)
  base_path_with_seperator = base_path + '/' if base_path else ''

  for build_url in build_urls:
    match_pattern = '{base_path_with_seperator}({file_pattern})'.format(
        base_path_with_seperator=base_path_with_seperator,
        file_pattern=file_pattern)
    match = re.match(match_pattern, build_url)
    if match:
      filename = match.group(1)
      revision = match.group(2)

      # Ensure that there are no duplicate revisions.
      if revision in filename_by_revision_dict:
        job_name = environment.get_value('JOB_NAME')
        raise errors.BadStateError(
            'Found duplicate revision %s when processing bucket. '
            'Bucket path is probably malformed for job %s.' % (revision,
                                                               job_name))

      filename_by_revision_dict[revision] = filename

  try:
    sorted_revisions = sorted(
        filename_by_revision_dict,
        reverse=reverse,
        key=lambda x: map(int, x.split('.')))
  except:
    logs.log_warn(
        'Revision pattern is not an integer, falling back to string sort.')
    sorted_revisions = sorted(filename_by_revision_dict, reverse=reverse)

  sorted_build_urls = []
  for revision in sorted_revisions:
    filename = filename_by_revision_dict[revision]
    sorted_build_urls.append('%s/%s' % (base_url, filename))

  return sorted_build_urls
def _fix_dictionary_line(line, dict_path):
    """Correct a single dictionary line."""
    # Ignore blank and comment lines.
    if not line or line.strip().startswith('#'):
        return line

    match = DICTIONARY_PART_PATTERN.match(line)
    # We expect this pattern to match even invalid dictionary entries. Failures
    # to match should be treated as bugs in this function.
    if not match:
        raise errors.BadStateError(
            'Failed to correct dictionary line "{line}" in {path}.'.format(
                line=line, path=dict_path))

    name_part = match.group(1) or ''
    entry = match.group(2)

    # In some cases, we'll detect the user's intended entry as a token name. This
    # can happen if the user included unquoted tokens such as "!=" or ">=".
    if not entry and name_part:
        entry = name_part
        name_part = ''

    # Handle quote entries as a special case. This simplifies later logic.
    if entry == '"':
        entry = '"\\\""'

    if entry.startswith('"') and entry.endswith('"'):
        return name_part + entry

    # In this case, we know the entry is invalid. Escape any unescaped quotes
    # within it, then append quotes to the front and back.
    new_entry = ''
    prev_character = ''
    for character in entry:
        if character == '"' and prev_character != '\\':
            new_entry += '\\'
        new_entry += character
        prev_character = character

    new_entry = '"{entry}"'.format(entry=new_entry)
    return name_part + new_entry
示例#4
0
def process_command(task):
    """Figures out what to do with the given task and executes the command."""
    logs.log("Executing command '%s'" % task.payload())
    if not task.payload().strip():
        logs.log_error('Empty task received.')
        return

    # Parse task payload.
    task_name = task.command
    task_argument = task.argument
    job_name = task.job

    environment.set_value('TASK_NAME', task_name)
    environment.set_value('TASK_ARGUMENT', task_argument)
    environment.set_value('JOB_NAME', job_name)
    if job_name != 'none':
        job = data_types.Job.query(data_types.Job.name == job_name).get()
        # Job might be removed. In that case, we don't want an exception
        # raised and causing this task to be retried by another bot.
        if not job:
            logs.log_error("Job '%s' not found." % job_name)
            return

        if not job.platform:
            error_string = "No platform set for job '%s'" % job_name
            logs.log_error(error_string)
            raise errors.BadStateError(error_string)

        # A misconfiguration led to this point. Clean up the job if necessary.
        job_queue_suffix = tasks.queue_suffix_for_platform(job.platform)
        bot_queue_suffix = tasks.default_queue_suffix()

        if job_queue_suffix != bot_queue_suffix:
            # This happens rarely, store this as a hard exception.
            logs.log_error(
                'Wrong platform for job %s: job queue [%s], bot queue [%s].' %
                (job_name, job_queue_suffix, bot_queue_suffix))

            # Try to recreate the job in the correct task queue.
            new_queue = (tasks.high_end_queue()
                         if task.high_end else tasks.regular_queue())
            new_queue += job_queue_suffix

            # Command override is continuously run by a bot. If we keep failing
            # and recreating the task, it will just DoS the entire task queue.
            # So, we don't create any new tasks in that case since it needs
            # manual intervention to fix the override anyway.
            if not task.is_command_override:
                tasks.add_task(task_name, task_argument, job_name, new_queue)

            # Add a wait interval to avoid overflowing task creation.
            failure_wait_interval = environment.get_value('FAIL_WAIT')
            time.sleep(failure_wait_interval)
            return

        # Get testcase (if any based on task argument).
        testcase = data_handler.get_entity_by_type_and_id(
            data_types.Testcase, task_argument)
        if testcase:
            current_platform_id = environment.get_platform_id()
            testcase_platform_id = testcase.platform_id

            # This indicates we are trying to run this job on the wrong platform.
            # This can happen when you have different type of devices (e.g
            # android) on the same platform group. In this case, we just recreate
            # the task.
            if (testcase_platform_id and not utils.fields_match(
                    testcase_platform_id, current_platform_id)):
                logs.log(
                    'Testcase %d platform (%s) does not match with ours (%s), exiting'
                    % (testcase.key.id(), testcase_platform_id,
                       current_platform_id))
                tasks.add_task(task_name, task_argument, job_name)
                return

        # Some fuzzers contain additional environment variables that should be
        # set for them. Append these for tests generated by these fuzzers and for
        # the fuzz command itself.
        fuzzer_name = None
        if task_name == 'fuzz':
            fuzzer_name = task_argument
        elif testcase:
            fuzzer_name = testcase.fuzzer_name

        # Get job's environment string.
        environment_string = job.get_environment_string()

        if task_name == 'minimize':
            # Let jobs specify a different job and fuzzer to minimize with.
            job_environment = job.get_environment()
            minimize_job_override = job_environment.get(
                'MINIMIZE_JOB_OVERRIDE')
            if minimize_job_override:
                minimize_job = data_types.Job.query(
                    data_types.Job.name == minimize_job_override).get()
                if minimize_job:
                    environment.set_value('JOB_NAME', minimize_job_override)
                    environment_string = minimize_job.get_environment_string()
                    environment_string += '\nORIGINAL_JOB_NAME = %s\n' % job_name
                    job_name = minimize_job_override
                else:
                    logs.log_error('Job for minimization not found: %s.' %
                                   minimize_job_override)
                    # Fallback to using own job for minimization.

            minimize_fuzzer_override = job_environment.get(
                'MINIMIZE_FUZZER_OVERRIDE')
            fuzzer_name = minimize_fuzzer_override or fuzzer_name

        if fuzzer_name:
            fuzzer = data_types.Fuzzer.query(
                data_types.Fuzzer.name == fuzzer_name).get()
            additional_default_variables = ''
            additional_variables_for_job = ''
            if (fuzzer and hasattr(fuzzer, 'additional_environment_string')
                    and fuzzer.additional_environment_string):
                for line in fuzzer.additional_environment_string.splitlines():
                    # Job specific values may be defined in fuzzer additional
                    # environment variable name strings in the form
                    # job_name:VAR_NAME = VALUE.
                    if '=' in line and ':' in line.split('=', 1)[0]:
                        fuzzer_job_name, environment_definition = line.split(
                            ':', 1)
                        if fuzzer_job_name == job_name:
                            additional_variables_for_job += '\n%s' % environment_definition
                        continue

                    additional_default_variables += '\n%s' % line

            environment_string += additional_default_variables
            environment_string += additional_variables_for_job

        # Update environment for the job.
        update_environment_for_job(environment_string)

    # Match the cpu architecture with the ones required in the job definition.
    # If they don't match, then bail out and recreate task.
    if not is_supported_cpu_arch_for_job():
        logs.log(
            'Unsupported cpu architecture specified in job definition, exiting.'
        )
        tasks.add_task(task_name, task_argument, job_name)
        return

    # Initial cleanup.
    cleanup_task_state()

    # Start http(s) servers.
    http_server.start()

    try:
        run_command(task_name, task_argument, job_name)
    finally:
        # Final clean up.
        cleanup_task_state()