def _CreateUniqueJobKeyForExistingJob(job, project):
  """Creates a key from the proto job instance's attributes passed as input.

  Args:
    job: An instance of job fetched from the backend.
    project: The base name of the project.
  Returns:
    A tuple of attributes used as a key to identify this job.
  """
  return (
      job.schedule.schedule,
      job.schedule.timeZone,
      job.appEngineHttpTarget.relativeUrl,
      job.description,
      convertors.CheckAndConvertStringToFloatIfApplicable(
          job.retryConfig.minBackoffDuration) if job.retryConfig else None,
      convertors.CheckAndConvertStringToFloatIfApplicable(
          job.retryConfig.maxBackoffDuration) if job.retryConfig else None,
      job.retryConfig.maxDoublings if job.retryConfig else None,
      convertors.CheckAndConvertStringToFloatIfApplicable(
          job.retryConfig.maxRetryDuration) if job.retryConfig else None,
      job.retryConfig.retryCount if job.retryConfig else None,
      parsers.ExtractTargetFromAppEngineHostUrl(
          job.appEngineHttpTarget.appEngineRouting.host, project),
    )
def _DoesAttributeNeedToBeUpdated(cur_queue_state, attribute, new_value):
  """Checks whether the attribute & value provided need to be updated.

  Note: We only check if the attribute exists in `queue.rateLimits` and
  `queue.retryConfig` since those are the only attributes we verify here. The
  only attribute we do not verify here is app-engine routing override which we
  handle separately.

  Args:
    cur_queue_state: apis.cloudtasks.<ver>.cloudtasks_<ver>_messages.Queue,
      The Queue instance fetched from the backend.
    attribute: Snake case representation of the CT API attribute name. One
      example is 'max_burst_size'.
    new_value: The value we are trying to set this attribute to.

  Returns:
    True if the attribute needs to be updated to the new value, False otherwise.
  """
  proto_attribute_name = convertors.ConvertStringToCamelCase(attribute)
  if (
      hasattr(cur_queue_state, 'rateLimits') and
      hasattr(cur_queue_state.rateLimits, proto_attribute_name)
  ):
    old_value = getattr(cur_queue_state.rateLimits, proto_attribute_name)
  elif hasattr(cur_queue_state.retryConfig, proto_attribute_name):
    old_value = getattr(cur_queue_state.retryConfig, proto_attribute_name)
  else:
    # Unable to get old attribute value.
    return True
  if old_value == new_value:
    return False
  if (
      old_value is None and
      attribute != 'max_concurrent_dispatches' and
      attribute in constants.PUSH_QUEUES_APP_DEPLOY_DEFAULT_VALUES and
      new_value == constants.PUSH_QUEUES_APP_DEPLOY_DEFAULT_VALUES[attribute]
  ):
    return False
  if attribute == 'max_dispatches_per_second' and not new_value:
    # No need to set rate if rate specified is 0. Instead, we will pause the
    # queue if it is not already paused or blocked.
    return False
  if old_value is None or new_value is None:
    return True
  old_value = convertors.CheckAndConvertStringToFloatIfApplicable(old_value)
  new_value = convertors.CheckAndConvertStringToFloatIfApplicable(new_value)
  if (
      isinstance(old_value, float) and
      isinstance(new_value, float)
  ):
    return not IsClose(old_value, new_value)
  return old_value != new_value
def _PostProcessMinMaxBackoff(cloud_task_args,
                              used_default_value_for_min_backoff,
                              cur_queue_state):
    """Checks min and max backoff values and updates the other value if needed.

  When uploading via queue.yaml files, if only one of the backoff values is
  specified, the other value will automatically be updated to the default
  value. If the default value does not satisfy the condition
  min_backoff <= max_backoff, then it is set equal to the other backoff value.

  Args:
    cloud_task_args: argparse.Namespace, A placeholder args namespace built to
      pass on forwards to Cloud Tasks API.
    used_default_value_for_min_backoff: A boolean value telling us if we used
      a default value for min_backoff or if it was specified explicitly in the
      YAML file.
    cur_queue_state: apis.cloudtasks.<ver>.cloudtasks_<ver>_messages.Queue,
      The Queue instance fetched from the backend if it exists, None otherwise.
  """
    if cloud_task_args.type == 'pull':
        return
    min_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
        cloud_task_args.min_backoff)
    max_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
        cloud_task_args.max_backoff)
    if min_backoff > max_backoff:
        if used_default_value_for_min_backoff:
            min_backoff = max_backoff
            cloud_task_args.min_backoff = cloud_task_args.max_backoff
            _SetSpecifiedArg(cloud_task_args, 'min_backoff',
                             cloud_task_args.max_backoff)
        else:
            max_backoff = min_backoff
            cloud_task_args.max_backoff = cloud_task_args.min_backoff
            _SetSpecifiedArg(cloud_task_args, 'max_backoff',
                             cloud_task_args.min_backoff)

    # Check if the backend values match with what we are trying to set
    if cur_queue_state and cur_queue_state.retryConfig:
        old_min_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
            cur_queue_state.retryConfig.minBackoff)
        old_max_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
            cur_queue_state.retryConfig.maxBackoff)
        if max_backoff == old_max_backoff and min_backoff == old_min_backoff:
            _DeleteSpecifiedArg(cloud_task_args, 'min_backoff')
            cloud_task_args.min_backoff = None
            _DeleteSpecifiedArg(cloud_task_args, 'max_backoff')
            cloud_task_args.max_backoff = None
示例#4
0
def _PostProcessMinMaxBackoff(cloud_task_args,
                              used_default_value_for_min_backoff):
    """Checks min and max backoff values and updates the other value if needed.

  When uploading via queue.yaml files, if only one of the backoff values is
  specified, the other value will automatically be updated to the default
  value. If the default value does not satisfy the condition
  min_backoff <= max_backoff, then it is set equal to the other backoff value.

  Args:
    cloud_task_args: argparse.Namespace, A dummy args namespace built to pass
      on forwards to Cloud Tasks API.
    used_default_value_for_min_backoff: A boolean value telling us if we used
      a default value for min_backoff or if it was specified explicitly in the
      YAML file.

  Returns:
    argparse.Namespace, The same dummy args namespace but with min_backoff and
    max_backoff values set appropriately.
  """
    min_backoff_specified = cloud_task_args.IsSpecified('min_backoff')
    max_backoff_specified = cloud_task_args.IsSpecified('max_backoff')
    min_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
        cloud_task_args.min_backoff)
    max_backoff = convertors.CheckAndConvertStringToFloatIfApplicable(
        cloud_task_args.max_backoff)
    if min_backoff_specified and max_backoff_specified:
        if min_backoff > max_backoff:
            if used_default_value_for_min_backoff:
                cloud_task_args.min_backoff = cloud_task_args.max_backoff
                _SetSpecifiedArg(cloud_task_args, 'min_backoff',
                                 cloud_task_args.max_backoff)
            else:
                cloud_task_args.max_backoff = cloud_task_args.min_backoff
                _SetSpecifiedArg(cloud_task_args, 'max_backoff',
                                 cloud_task_args.min_backoff)
    elif min_backoff_specified and not max_backoff_specified:
        if min_backoff > 3600:
            cloud_task_args.max_backoff = cloud_task_args.min_backoff
            _SetSpecifiedArg(cloud_task_args, 'max_backoff',
                             cloud_task_args.min_backoff)
    elif max_backoff_specified and not min_backoff_specified:
        if max_backoff < 0.1:
            cloud_task_args.min_backoff = cloud_task_args.max_backoff
            _SetSpecifiedArg(cloud_task_args, 'min_backoff',
                             cloud_task_args.max_backoff)
def ValidateCronYamlFileConfig(config):
  """Validates jobs configuration parameters in the cron YAML file.

  The purpose of this function is to mimick the behaviour of the old
  implementation of `gcloud app deploy cron.yaml` before migrating away
  from console-admin-hr. The errors generated are the same as the ones
  previously seen when gcloud sent the batch-request for updating jobs to the
  Zeus backend.

  Args:
     config: A yaml_parsing.ConfigYamlInfo object for the parsed YAML file we
      are going to process.

  Raises:
    HTTPError: Various different scenarios defined in the function can cause
      this exception to be raised.
  """
  cron_yaml = config.parsed
  if not cron_yaml.cron:
    return
  for job in cron_yaml.cron:

    # Retry Parameters
    if job.retry_parameters:

      # Job Retry Limit
      if (
          job.retry_parameters.job_retry_limit and
          job.retry_parameters.job_retry_limit > 5
      ):
        _RaiseHTTPException(
            'Invalid Cron retry parameters: Cannot set retry limit to more '
            'than 5 (currently set to {}).'.format(
                job.retry_parameters.job_retry_limit))

      # Job Age Limit
      if (
          job.retry_parameters.job_age_limit and
          int(convertors.CheckAndConvertStringToFloatIfApplicable(
              job.retry_parameters.job_age_limit)) <= 0
      ):
        _RaiseHTTPException(
            'Invalid Cron retry parameters: Job age limit must be greater '
            'than zero seconds.')

      # Min & Max backoff comparison
      if (
          job.retry_parameters.min_backoff_seconds is not None and
          job.retry_parameters.max_backoff_seconds is not None
      ):
        min_backoff = job.retry_parameters.min_backoff_seconds
        max_backoff = job.retry_parameters.max_backoff_seconds
        if max_backoff < min_backoff:
          _RaiseHTTPException(
              'Invalid Cron retry parameters: Min backoff sec must not be '
              'greater than than max backoff sec.')
示例#6
0
 def testCheckAndConvertStringToFloatIfApplicable(self):
     combinations_to_test = (
         ('2m', 120),
         ('1.5h', 5400),
         ('8.5s', 8.5),
         ('1d', 86400),
         ('max_retry_duration', 'max_retry_duration'),
         (17.4, 17.4),
     )
     for input_str, output in combinations_to_test:
         self.assertEqual(
             convertors.CheckAndConvertStringToFloatIfApplicable(input_str),
             output)
def _CreateUniqueJobKeyForYamlJob(job):
  """Creates a key from the YAML job instance's attributes passed as input.

  Args:
    job: An instance of a parsed YAML job object.
  Returns:
    A tuple of attributes used as a key to identify this job.
  """
  retry_params = job.retry_parameters
  return (
      job.schedule,
      job.timezone if job.timezone else 'UTC',
      job.url,
      job.description,
      retry_params.min_backoff_seconds if retry_params else None,
      retry_params.max_backoff_seconds if retry_params else None,
      retry_params.max_doublings if retry_params else None,
      convertors.CheckAndConvertStringToFloatIfApplicable(
          retry_params.job_age_limit) if retry_params else None,
      retry_params.job_retry_limit if retry_params else None,
      job.target,
  )
def ValidateQueueYamlFileConfig(config):
  """Validates queue configuration parameters in the queue YAML file.

  The purpose of this function is to mimick the behaviour of the old
  implementation of `gcloud app deploy queue.yaml` before migrating away
  from console-admin-hr. The errors generated are the same as the ones
  previously seen when gcloud sent the batch-request for updating queues to the
  Zeus backend.

  Args:
     config: A yaml_parsing.ConfigYamlInfo object for the parsed YAML file we
      are going to process.

  Raises:
    HTTPError: Various different scenarios defined in the function can cause
      this exception to be raised.
  """
  queue_yaml = config.parsed
  if not queue_yaml.queue:
    return
  for queue in queue_yaml.queue:
    # Push queues
    if not queue.mode or queue.mode == constants.PUSH_QUEUE:

      # Rate
      if not queue.rate:
        _RaiseHTTPException(
            'Invalid queue configuration. Refill rate must be specified for '
            'push-based queue.')
      else:
        rate_in_seconds = convertors.ConvertRate(queue.rate)
        if rate_in_seconds > constants.MAX_RATE:
          _RaiseHTTPException(
              'Invalid queue configuration. Refill rate must not exceed '
              '{} per second (is {:.1f}).'.format(
                  constants.MAX_RATE, rate_in_seconds))

      # Retry Parameters
      if queue.retry_parameters:
        # Task Retry Limit
        _ValidateTaskRetryLimit(queue)

        # Task Age Limit
        if (
            queue.retry_parameters.task_age_limit and
            int(convertors.CheckAndConvertStringToFloatIfApplicable(
                queue.retry_parameters.task_age_limit)) <= 0
        ):
          _RaiseHTTPException(
              'Invalid queue configuration. Task age limit must be greater '
              'than zero.')

        # Min backoff
        if (
            queue.retry_parameters.min_backoff_seconds and
            queue.retry_parameters.min_backoff_seconds < 0
        ):
          _RaiseHTTPException(
              'Invalid queue configuration. Min backoff seconds must not be '
              'less than zero.')

        # Max backoff
        if (
            queue.retry_parameters.max_backoff_seconds and
            queue.retry_parameters.max_backoff_seconds < 0
        ):
          _RaiseHTTPException(
              'Invalid queue configuration. Max backoff seconds must not be '
              'less than zero.')

        # Max Doublings
        if (
            queue.retry_parameters.max_doublings and
            queue.retry_parameters.max_doublings < 0
        ):
          _RaiseHTTPException(
              'Invalid queue configuration. Max doublings must not be less '
              'than zero.')

        # Min & Max backoff comparison
        if (
            queue.retry_parameters.min_backoff_seconds is not None and
            queue.retry_parameters.max_backoff_seconds is not None
        ):
          min_backoff = queue.retry_parameters.min_backoff_seconds
          max_backoff = queue.retry_parameters.max_backoff_seconds
          if max_backoff < min_backoff:
            _RaiseHTTPException(
                'Invalid queue configuration. Min backoff sec must not be '
                'greater than than max backoff sec.')

      # Bucket size
      if queue.bucket_size:
        if queue.bucket_size < 0:
          _RaiseHTTPException(
              'Error updating queue "{}": The queue rate is invalid.'.format(
                  queue.name))
        elif queue.bucket_size > constants.MAX_BUCKET_SIZE:
          _RaiseHTTPException(
              'Error updating queue "{}": Maximum bucket size is {}.'.format(
                  queue.name, constants.MAX_BUCKET_SIZE))

    # Pull Queues
    else:
      # Rate
      if queue.rate:
        _RaiseHTTPException(
            'Invalid queue configuration. Refill rate must not be specified '
            'for pull-based queue.')

      # Retry Parameters
      if queue.retry_parameters:
        # Task Retry Limit
        _ValidateTaskRetryLimit(queue)

        # Task Age Limit
        if queue.retry_parameters.task_age_limit is not None:
          _RaiseHTTPException(
              "Invalid queue configuration. Can't specify task_age_limit "
              "for a pull queue.")

        # Min backoff
        if queue.retry_parameters.min_backoff_seconds is not None:
          _RaiseHTTPException(
              "Invalid queue configuration. Can't specify min_backoff_seconds "
              "for a pull queue.")

        # Max backoff
        if queue.retry_parameters.max_backoff_seconds is not None:
          _RaiseHTTPException(
              "Invalid queue configuration. Can't specify max_backoff_seconds "
              "for a pull queue.")

        # Max doublings
        if queue.retry_parameters.max_doublings is not None:
          _RaiseHTTPException(
              "Invalid queue configuration. Can't specify max_doublings "
              "for a pull queue.")

      # Max concurrent requests
      if queue.max_concurrent_requests is not None:
        _RaiseHTTPException(
            'Invalid queue configuration. Max concurrent requests must not '
            'be specified for pull-based queue.')

      # Bucket size
      if queue.bucket_size is not None:
        _RaiseHTTPException(
            'Invalid queue configuration. Bucket size must not be specified '
            'for pull-based queue.')

      # Target
      if queue.target:
        _RaiseHTTPException(
            'Invalid queue configuration. Target must not be specified for '
            'pull-based queue.')