def AddRotationPeriodFlag(parser):
    parser.add_argument('--rotation-period',
                        type=arg_parsers.Duration(lower_bound='1d'),
                        help='Automatic rotation period of the key.')
    The validated argument value.

  Raises:
    InvalidArgException: If the arg value is missing or is not valid.
  """
  if arg_value is None:
    raise InvalidArgException(arg_internal_name, 'no argument value found.')
  if arg_internal_name in _FILE_ARG_VALIDATORS:
    return _FILE_ARG_VALIDATORS[arg_internal_name](arg_internal_name, arg_value)
  return _ValidateString(arg_internal_name, arg_value)


# Constants shared between arg-file validation and CLI flag validation.
POSITIVE_INT_PARSER = arg_parsers.BoundedInt(1, sys.maxint)
NONNEGATIVE_INT_PARSER = arg_parsers.BoundedInt(0, sys.maxint)
TIMEOUT_PARSER = arg_parsers.Duration(lower_bound='1m', upper_bound='6h')
ORIENTATION_LIST = ['portrait', 'landscape']


def ValidateStringList(arg_internal_name, arg_value):
  """Validates an arg whose value should be a list of strings.

  Args:
    arg_internal_name: the internal form of the arg name.
    arg_value: the argument's value parsed from yaml file.

  Returns:
    The validated argument value.

  Raises:
    InvalidArgException: the argument's value is not valid.
Beispiel #3
0
def AddAutoscalerArgs(parser,
                      queue_scaling_enabled=False,
                      autoscaling_file_enabled=False,
                      stackdriver_metrics_flags=False):
    """Adds commandline arguments to parser."""
    parser.add_argument(
        '--cool-down-period',
        type=arg_parsers.Duration(),
        help=(
            'The time period that the autoscaler should wait before it starts '
            'collecting information from a new instance. This prevents the '
            'autoscaler from collecting information when the instance is '
            'initializing, during which the collected usage would not be '
            'reliable. The default is 60 seconds.'))
    parser.add_argument('--description', help='Notes about Autoscaler.')
    parser.add_argument('--min-num-replicas',
                        type=arg_parsers.BoundedInt(0, sys.maxint),
                        help='Minimum number of replicas Autoscaler will set.')
    parser.add_argument('--max-num-replicas',
                        type=arg_parsers.BoundedInt(0, sys.maxint),
                        required=not autoscaling_file_enabled,
                        help='Maximum number of replicas Autoscaler will set.')
    parser.add_argument('--scale-based-on-cpu',
                        action='store_true',
                        help='Autoscaler will be based on CPU utilization.')
    parser.add_argument('--scale-based-on-load-balancing',
                        action='store_true',
                        help=('Use autoscaling based on load balancing '
                              'utilization.'))
    parser.add_argument(
        '--target-cpu-utilization',
        type=arg_parsers.BoundedFloat(0.0, 1.0),
        help='Autoscaler will aim to maintain CPU utilization at '
        'target level (0.0 to 1.0).')
    parser.add_argument(
        '--target-load-balancing-utilization',
        type=arg_parsers.BoundedFloat(0.0, None),
        help='Autoscaler will aim to maintain the load balancing '
        'utilization level (greater than 0.0).')
    custom_metric_utilization_help = """\
Adds a target metric value for the Autoscaler to use.

*metric*::: Protocol-free URL of a Google Cloud Monitoring metric.

*utilization-target*::: Value of the metric Autoscaler will aim to
  maintain (greater than 0.0).

*utilization-target-type*::: How target is expressed. Valid values: {0}.
""".format(', '.join(_ALLOWED_UTILIZATION_TARGET_TYPES))
    if stackdriver_metrics_flags:
        custom_metric_utilization_help += """
Mutually exclusive with `--update-stackdriver-metric`.
"""
    parser.add_argument(
        '--custom-metric-utilization',
        type=arg_parsers.ArgDict(spec={
            'metric': str,
            'utilization-target': float,
            'utilization-target-type': str,
        }, ),
        action='append',
        help=custom_metric_utilization_help,
    )

    if queue_scaling_enabled:
        parser.add_argument('--queue-scaling-cloud-pub-sub',
                            type=arg_parsers.ArgDict(spec={
                                'topic': str,
                                'subscription': str,
                            }, ),
                            help="""\
        Specifies queue-based scaling based on a Cloud Pub/Sub queuing system.
        Both topic and subscription are required.

        *topic*::: Topic specification. Can be just a name or a partial URL
        (starting with "projects/..."). Topic must belong to the same project as
        Autoscaler.

        *subscription*::: Subscription specification. Can be just a name or a
        partial URL (starting with "projects/..."). Subscription must belong to
        the same project as Autoscaler and must be connected to the specified
        topic.
        """)
        parser.add_argument(
            '--queue-scaling-acceptable-backlog-per-instance',
            type=arg_parsers.BoundedFloat(0.0, None),
            help='Queue-based scaling target: autoscaler will aim '
            'to assure that average number of tasks in the queue '
            'is no greater than this value.',
        )
        parser.add_argument(
            '--queue-scaling-single-worker-throughput',
            type=arg_parsers.BoundedFloat(0.0, None),
            help='Hint the autoscaler for queue-based scaling on '
            'how much throughput a single worker instance is able '
            'to consume.')
    if autoscaling_file_enabled:
        parser.add_argument(
            '--autoscaling-file',
            metavar='PATH',
            help=(
                'Path of the file from which autoscaling configuration will be '
                'loaded. This flag allows you to atomically setup complex '
                'autoscalers.'))
    if stackdriver_metrics_flags:
        parser.add_argument(
            '--remove-stackdriver-metric',
            metavar='METRIC',
            help=(
                'Stackdriver metric to remove from autoscaling configuration. '
                'If the metric is the only input used for autoscaling the '
                'command will fail.'))
        parser.add_argument(
            '--update-stackdriver-metric',
            metavar='METRIC',
            help=
            ('Stackdriver metric to use as an input for autoscaling. '
             'When using this flag you must also specify target value of the '
             'metric by specifying '
             '`--stackdriver-metric-single-instance-assignment` or '
             '`--stackdriver-metric-utilization-target` and '
             '`--stackdriver-metric-utilization-target-type`. '
             'Mutually exclusive with `--custom-metric-utilization`.'))
        parser.add_argument(
            '--stackdriver-metric-filter',
            metavar='FILTER',
            help=('Expression for filtering samples used to autoscale, see '
                  'https://cloud.google.com/monitoring/api/v3/filters.'))
        parser.add_argument(
            '--stackdriver-metric-utilization-target',
            metavar='TARGET',
            type=float,
            help=('Value of the metric Autoscaler will aim to maintain. When '
                  'specifying this flag you must also provide '
                  '`--stackdriver-metric-utilization-target-type`. Mutually '
                  'exclusive with '
                  '`--stackdriver-metric-single-instance-assignment` and '
                  '`--custom-metric-utilization`.'))

        parser.add_argument(
            '--stackdriver-metric-utilization-target-type',
            metavar='TARGET_TYPE',
            choices=_ALLOWED_UTILIZATION_TARGET_TYPES_LOWER,
            help=('Value of the metric Autoscaler will aim to maintain. When '
                  'specifying this flag you must also provide '
                  '`--stackdriver-metric-utilization-target`. Mutually '
                  'exclusive with '
                  '`--stackdriver-metric-single-instance-assignment` and '
                  '`--custom-metric-utilization`.'))
        parser.add_argument(
            '--stackdriver-metric-single-instance-assignment',
            metavar='ASSIGNMENT',
            type=float,
            help=('Autoscaler will aim to maintain value of metric divided by '
                  'number of instances at this level. Mutually '
                  'exclusive with '
                  '`-stackdriver-metric-utilization-target-type`, '
                  '`-stackdriver-metric-utilization-target-type`, and '
                  '`--custom-metric-utilization`.'))
Beispiel #4
0
def AddUpdateArgs(parser, include_alpha_logging,
                  include_l7_internal_load_balancing, include_stack_type,
                  include_ipv6_access_type, api_version):
    """Add args to the parser for subnet update.

  Args:
    parser: The argparse parser.
    include_alpha_logging: Include alpha-specific logging args.
    include_l7_internal_load_balancing: Include Internal HTTP(S) LB args.
    include_stack_type: Include stack type args.
    include_ipv6_access_type: Include IPv6 access type args.
    api_version: The api version of the request.
  """
    messages = apis.GetMessagesModule('compute',
                                      compute_api.COMPUTE_GA_API_VERSION)

    updated_field = parser.add_mutually_exclusive_group()

    updated_field.add_argument(
        '--enable-private-ip-google-access',
        action=arg_parsers.StoreTrueFalseAction,
        help=(
            'Enable/disable access to Google Cloud APIs from this subnet for '
            'instances without a public ip address.'))

    updated_field.add_argument('--add-secondary-ranges',
                               type=arg_parsers.ArgDict(min_length=1),
                               action='append',
                               metavar='PROPERTY=VALUE',
                               help="""\
      Adds secondary IP ranges to the subnetwork for use in IP aliasing.

      For example, `--add-secondary-ranges range1=192.168.64.0/24` adds
      a secondary range 192.168.64.0/24 with name range1.

      * `RANGE_NAME` - Name of the secondary range.
      * `RANGE` - `IP range in CIDR format.`
      """)

    updated_field.add_argument('--remove-secondary-ranges',
                               type=arg_parsers.ArgList(min_length=1),
                               action='append',
                               metavar='PROPERTY=VALUE',
                               help="""\
      Removes secondary ranges from the subnetwork.

      For example, `--remove-secondary-ranges range2,range3` removes the
      secondary ranges with names range2 and range3.
      """)

    updated_field.add_argument(
        '--enable-flow-logs',
        action=arg_parsers.StoreTrueFalseAction,
        help=(
            'Enable/disable VPC Flow Logs for this subnet. If the subnet does '
            ' not support VPC Flow Logs, this flag has no effect. For '
            ' more information, see '
            'https://cloud.google.com/vpc/docs/using-flow-logs.'))

    AddLoggingAggregationInterval(parser, messages)
    parser.add_argument('--logging-flow-sampling',
                        type=arg_parsers.BoundedFloat(lower_bound=0.0,
                                                      upper_bound=1.0),
                        help="""\
      Can only be specified if VPC Flow logs for this subnetwork is
      enabled. The value of the field must be in [0, 1]. Set the sampling rate
      of VPC flow logs within the subnetwork where 1.0 means all collected
      logs are reported and 0.0 means no logs are reported. Default is 0.5
      which means half of all collected logs are reported.
      """)
    AddLoggingMetadata(parser, messages)

    parser.add_argument('--logging-filter-expr',
                        help="""\
      Can only be specified if VPC Flow Logs for this subnetwork is enabled.
      Export filter used to define which logs should be generated.
      """)
    parser.add_argument('--logging-metadata-fields',
                        type=arg_parsers.ArgList(),
                        metavar='METADATA_FIELD',
                        default=None,
                        help="""\
      Can only be specified if VPC Flow Logs for this subnetwork is enabled
      and "metadata" is set to CUSTOM_METADATA. The comma-separated list of
      metadata fields that should be added to reported logs.
      """)

    if include_alpha_logging:
        messages = apis.GetMessagesModule(
            'compute', compute_api.COMPUTE_ALPHA_API_VERSION)
        AddLoggingAggregationIntervalDeprecated(parser, messages)
        parser.add_argument('--flow-sampling',
                            type=arg_parsers.BoundedFloat(lower_bound=0.0,
                                                          upper_bound=1.0),
                            help="""\
        Can only be specified if VPC Flow Logs for this subnetwork is enabled.
        The value of the field must be in [0, 1]. Set the sampling rate of
        VPC Flow Logs within the subnetwork where 1.0 means all collected
        logs are reported and 0.0 means no logs are reported. Default is 0.5
        which means half of all collected logs are reported.
        """)
        AddLoggingMetadataDeprecated(parser, messages)

    if include_l7_internal_load_balancing:
        updated_field.add_argument(
            '--role',
            choices={'ACTIVE': 'The ACTIVE subnet that is currently used.'},
            type=lambda x: x.replace('-', '_').upper(),
            help=(
                'The role is set to ACTIVE to update a BACKUP reserved '
                'address range to\nbe the new ACTIVE address range. Note '
                'that the only supported value for\nthis flag is ACTIVE since '
                'setting an address range to BACKUP is not\nsupported. '
                '\n\nThis field is only valid when updating a reserved IP '
                'address range used\nfor the purpose of Internal HTTP(S) Load '
                'Balancer.'))
        parser.add_argument('--drain-timeout',
                            type=arg_parsers.Duration(lower_bound='0s'),
                            default='0s',
                            help="""\
        The time period for draining traffic from Internal HTTP(S) Load Balancer
        proxies that are assigned addresses in the current ACTIVE subnetwork.
        For example, ``1h'', ``60m'' and ``3600s'' each specify a duration of
        1 hour for draining the traffic. Longer times reduce the number of
        proxies that are draining traffic at any one time, and so improve
        the availability of proxies for load balancing. The drain timeout is
        only applicable when the [--role=ACTIVE] flag is being used.
        """)

    if include_stack_type:
        parser.add_argument(
            '--stack-type',
            choices={
                'IPV4_ONLY':
                'New VMs in this subnet will only be assigned IPv4 addresses',
                'IPV4_IPV6':
                'New VMs in this subnet can have both IPv4 and IPv6 addresses'
            },
            type=arg_utils.ChoiceToEnumName,
            help=(
                'The stack type for this subnet to identify whether the IPv6 '
                'feature is enabled or not.'))

    if include_ipv6_access_type:
        parser.add_argument(
            '--ipv6-access-type',
            choices={
                'INTERNAL': 'VMs in this subnet can have internal IPv6.',
                'EXTERNAL': 'VMs in this subnet can have external IPv6.'
            },
            type=arg_utils.ChoiceToEnumName,
            help=(
                'The access type of IPv6 address this subnet holds. It\'s '
                'immutable and can only be specified during creation or the '
                'time the subnet is updated into IPV4_IPV6 dual stack. If the '
                'ipv6 access type is EXTERNAL then this subnet cannot enable '
                'direct path.'))

    messages = apis.GetMessagesModule('compute', api_version)
    GetPrivateIpv6GoogleAccessTypeFlagMapper(messages).choice_arg.AddToParser(
        updated_field)
Beispiel #5
0
    def Args(parser):
        host = parser.add_argument(
            '--host',
            help='The value of the host header used by the HTTPS health check.'
        )
        host.detailed_help = """\
        The value of the host header used in this HTTPS health check request.
        By default, this is empty and Google Compute Engine automatically sets
        the host header in health requests to the same external IP address as
        the forwarding rule associated with the target pool.
        """

        port = parser.add_argument(
            '--port',
            help='The TCP port number for the health request. Default is 443.',
            type=int,
            default=443)
        port.detailed_help = """\
        The TCP port number that this health check monitors. The default value
        is 443.
        """

        request_path = parser.add_argument(
            '--request-path',
            help="The request path for the health check. Default is ``/''.",
            default='/')
        request_path.detailed_help = """\
        The request path that this health check monitors. For example,
        ``/healthcheck''. The default value is ``/''.
        """

        check_interval_sec = parser.add_argument(
            '--check-interval',
            help='How often to run the check. Default is 5s.',
            type=arg_parsers.Duration(),
            default='5s')
        check_interval_sec.detailed_help = """\
        How often to perform a health check for an instance. For example,
        specifying ``10s'' will run the check every 10 seconds. Valid units
        for this flag are ``s'' for seconds and ``m'' for minutes.
        The default value is ``5s''.
        """

        timeout_sec = parser.add_argument(
            '--timeout',
            help='How long to wait until check is a failure. Default is 5s.',
            type=arg_parsers.Duration(),
            default='5s')
        timeout_sec.detailed_help = """\
        If Google Compute Engine doesn't receive an HTTPS 200 response from the
        instance by the time specified by the value of this flag, the health
        check request is considered a failure. For example, specifying ``10s''
        will cause the check to wait for 10 seconds before considering the
        request a failure.  Valid units for this flag are ``s'' for seconds and
        ``m'' for minutes.  The default value is ``5s''.
        """

        unhealthy_threshold = parser.add_argument(
            '--unhealthy-threshold',
            help=
            'Consecutive failures to mark instance unhealthy. Default is 2.',
            type=int,
            default=2)
        unhealthy_threshold.detailed_help = """\
        The number of consecutive health check failures before a healthy
        instance is marked as unhealthy. The default is 2.
        """

        healthy_threshold = parser.add_argument(
            '--healthy-threshold',
            help=
            'Consecutive successes to mark instance healthy. Default is 2.',
            type=int,
            default=2)
        healthy_threshold.detailed_help = """\
        The number of consecutive successful health checks before an
        unhealthy instance is marked as healthy. The default is 2.
        """

        parser.add_argument(
            '--description',
            help='An optional, textual description for the HTTPS health check.'
        )

        parser.add_argument('name', help='The name of the HTTPS health check.')
  def Args(cls, parser):
    # Required args. The audience is a positional arg, meaning it is required.
    parser.add_argument(
        'audience', help='The workforce pool provider resource name.')

    # The credential source must be specified (file-sourced or URL-sourced).
    credential_types = parser.add_group(
        mutex=True, required=True, help='Credential types.')
    credential_types.add_argument(
        '--credential-source-file',
        help='The location of the file which stores the credential.')
    credential_types.add_argument(
        '--credential-source-url',
        help='The URL to obtain the credential from.')
    if cls._use_pluggable_auth:
      credential_types.add_argument(
          '--executable-command',
          hidden=True,
          help='The full command to run to retrieve the credential. Must be an absolute path for the program.'
      )

    parser.add_argument(
        '--workforce-pool-user-project',
        help='The client project number used to identify the application ' +
        '(client project) to the server when calling Google APIs. The user ' +
        'principal must have serviceusage.services.use IAM permission to use ' +
        'the specified project.',
        required=True)
    parser.add_argument(
        '--output-file',
        help='Location to store the generated credential configuration file.',
        required=True)

    # Optional args.
    parser.add_argument(
        '--service-account',
        help='The email of the service account to impersonate.')
    parser.add_argument(
        '--subject-token-type',
        help='The type of token being used for authorization. ' +
        'This defaults to urn:ietf:params:oauth:token-type:id_token.')
    parser.add_argument(
        '--credential-source-headers',
        type=arg_parsers.ArgDict(),
        metavar='key=value',
        help='Headers to use when querying the credential-source-url.')
    parser.add_argument(
        '--credential-source-type',
        help='The format of the credential source (JSON or text).')
    parser.add_argument(
        '--credential-source-field-name',
        help='The subject token field name (key) in a JSON credential source.')

    if cls._use_pluggable_auth:
      executable_args = parser.add_group(
          hidden=True,
          help='Arguments for an executable type credential source.')

      executable_args.add_argument(
          '--executable-timeout-millis',
          hidden=True,
          type=arg_parsers.Duration(
              default_unit='ms',
              lower_bound='5s',
              upper_bound='120s',
              parsed_unit='ms'),
          help='The timeout duration in milliseconds for waiting for the executable to finish.'
      )
      executable_args.add_argument(
          '--executable-output-file',
          hidden=True,
          help='The absolute path to the file storing the executable response.')
Beispiel #7
0
 def ParseWithDefault(value):
     if value == DEFAULT_MESSAGE_RETENTION_VALUE:
         return DEFAULT_MESSAGE_RETENTION_VALUE
     return str(arg_parsers.Duration()(value)) + 's'
def AddFlexibleCacheStepOne(parser, resource_name, update_command=False):
  """Adds cache mode, max ttl, default ttl, client ttl and custom response header args to the argparse."""
  # TODO (b/165456063) document enums as lowercase-with-dash. Accept both forms.
  parser.add_argument(
      '--cache-mode',
      choices={
          'CACHE_ALL_STATIC':
              """Automatically cache static content, including common image
              formats, media (video and audio), web assets (JavaScript and CSS).
              Requests and responses that are marked as uncacheable, as well as
              dynamic content (including HTML), aren't cached.""",
          'USE_ORIGIN_HEADERS':
              """Require the origin to set valid caching headers to cache
              content. Responses without these headers aren't cached at
              Google's edge, and require a full trip to the origin on every
              request, potentially impacting performance and increasing load on
              the origin server.""",
          'FORCE_CACHE_ALL':
              """Cache all content, ignoring any "private", "no-store" or
              "no-cache" directives in Cache-Control response headers. Warning:
              this may result in Cloud CDN caching private, per-user (user
              identifiable) content. You should only enable this on backends
              that are not serving private or dynamic content, such as storage
              buckets."""
      },
      type=lambda x: x.replace('-', '_').upper(),
      default=None,
      help="""\
      Specifies the cache setting for all responses from this backend.
      """)
  client_ttl_help = """\
  Specifies a separate client (for example, browser client) TTL, separate from the TTL
  for Cloud CDN's edge caches.

  This allows you to set a shorter TTL for browsers/clients, and to have those
  clients revalidate content against Cloud CDN on a more regular basis, without
  requiring revalidation at the origin.

  The value of clientTtl cannot be set to a value greater than that of maxTtl,
  but can be equal.

  Any cacheable response has its max-age/s-maxage directives adjusted down to
  the client TTL value if necessary; an Expires header will be replaced with a
  suitable max-age directive.

  The maximum allowed value is 86400s (1 day).

  When creating a new backend with CACHE_ALL_STATIC and the field is unset, or
  when switching to that mode and the field is unset, a default value of 3600
  is used.

  When the cache mode is set to "USE_ORIGIN_HEADERS", you must omit this field.
  """
  client_ttl_group = parser.add_mutually_exclusive_group()
  client_ttl_group.add_argument(
      '--client-ttl',
      type=arg_parsers.Duration(upper_bound=86400),
      default=None,
      help=client_ttl_help,
  )
  if update_command:
    client_ttl_group.add_argument(
        '--no-client-ttl', action='store_true', help='Clears client TTL value.')
  default_ttl_help = """\
  Specifies the default TTL for cached content served by this origin for
  responses that do not have an existing valid TTL (max-age or s-maxage).

  The default value is 3600s for cache modes that allow a default TTL to be
  defined.

  The value of defaultTtl cannot be set to a value greater than that of maxTtl,
  but can be equal.

  When the cacheMode is set to FORCE_CACHE_ALL, the defaultTtl overwrites
  the TTL set in all responses.

  A TTL of "0" means Always revalidate.

  The maximum allowed value is 31,622,400s (1 year). Infrequently
  accessed objects may be evicted from the cache before the defined TTL.

  When creating a new backend with CACHE_ALL_STATIC or FORCE_CACHE_ALL and the
  field is unset, or when updating an existing backend to use these modes and
  the field is unset, a default value of 3600 is used. When the cache mode is
  set to "USE_ORIGIN_HEADERS", you must omit this field.
  """
  default_ttl_group = parser.add_mutually_exclusive_group()
  default_ttl_group.add_argument(
      '--default-ttl',
      type=arg_parsers.Duration(upper_bound=31622400),
      default=None,
      help=default_ttl_help,
  )
  if update_command:
    default_ttl_group.add_argument(
        '--no-default-ttl',
        action='store_true',
        help='Clears default TTL value.')
  max_ttl_help = """\
  Specifies the maximum allowed TTL for cached content served by this origin.

  The default value is 86400 for cache modes that support a max TTL.

  Cache directives that attempt to set a max-age or s-maxage higher than this,
  or an Expires header more than maxTtl seconds in the future, are capped at
  the value of maxTtl, as if it were the value of an s-maxage Cache-Control
  directive.

  A TTL of "0" means Always revalidate.

  The maximum allowed value is 31,622,400s (1 year). Infrequently
  accessed objects may be evicted from the cache before the defined TTL.

  When creating a new backend with CACHE_ALL_STATIC and the field is unset, or
  when updating an existing backend to use these modes and the field is unset,
  a default value of 86400 is used. When the cache mode is set to
  "USE_ORIGIN_HEADERS" or "FORCE_CACHE_ALL", you must omit this field.
  """
  max_ttl_group = parser.add_mutually_exclusive_group()
  max_ttl_group.add_argument(
      '--max-ttl',
      type=arg_parsers.Duration(upper_bound=31622400),
      default=None,
      help=max_ttl_help,
  )
  if update_command:
    max_ttl_group.add_argument(
        '--no-max-ttl', action='store_true', help='Clears max TTL value.')
  custom_response_header_help = """\
  Custom headers that the external HTTP(S) load balancer adds to proxied responses.
  For the list of headers, see [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).

  Variables are not case-sensitive.
  """
  custom_response_header_group = parser.add_mutually_exclusive_group()
  custom_response_header_group.add_argument(
      '--custom-response-header',
      action='append',
      help=custom_response_header_help)
  if update_command:
    custom_response_header_group.add_argument(
        '--no-custom-response-headers',
        action='store_true',
        help='Remove all custom response headers for the %s.' % resource_name)
def AddFlexibleCacheStepTwo(parser, resource_name, update_command=False):
  """Adds serve-while-stale and bypass-cache-on-request-headers args to the argparse."""
  serve_while_stale_help = """\
  Serve existing content from the cache (if available) when revalidating
  content with the origin; this allows content to be served more quickly, and
  also allows content to continue to be served if the backend is down or
  reporting errors.

  This setting defines the default serve-stale duration for any cached responses
  that do not specify a stale-while-revalidate directive. Stale responses that
  exceed the TTL configured here will not be served without first being
  revalidated with the origin. The default limit is 86400s (1 day), which will
  allow stale content to be served up to this limit beyond the max-age
  (or s-max-age) of a cached response.

  The maximum allowed value is 604800 (1 week).

  Set this to zero (0) to disable serve-while-stale.
  """
  serve_while_stale_group = parser.add_mutually_exclusive_group()
  serve_while_stale_group.add_argument(
      '--serve-while-stale',
      type=arg_parsers.Duration(upper_bound=604800),
      default=None,
      help=serve_while_stale_help,
  )
  if update_command:
    serve_while_stale_group.add_argument(
        '--no-serve-while-stale',
        action='store_true',
        help='Clears serve while stale value.')
  bypass_cache_on_request_headers_help = """\
  Bypass the cache when the specified request headers are matched - e.g.
  Pragma or Authorization headers. Up to 5 headers can be specified.

  The cache is bypassed for all cdnPolicy.cacheMode settings.

  Note that requests that include these headers will always fill from origin,
  and may result in a large number of cache misses if the specified headers are
  common to many requests.

  Values are case-insensitive.

  The header name must be a valid HTTP header field token (per RFC 7230).

  For the list of restricted headers, see the list of required header name
  properties in [How custom headers work](https://cloud.google.com/load-balancing/docs/custom-headers#how_custom_headers_work).

  A header name must not appear more than once in the list of added headers.
  """
  bypass_cache_on_request_headers_group = parser.add_mutually_exclusive_group()
  bypass_cache_on_request_headers_group.add_argument(
      '--bypass-cache-on-request-headers',
      action='append',
      help=bypass_cache_on_request_headers_help)
  if update_command:
    bypass_cache_on_request_headers_group.add_argument(
        '--no-bypass-cache-on-request-headers',
        action='store_true',
        help='Remove all bypass cache on request headers for the %s.' %
        resource_name)
    def Args(cls, parser):
        parser.add_argument(
            'audience',
            help='The workload identity pool provider resource name.')

        credential_types = parser.add_group(mutex=True,
                                            required=True,
                                            help='Credential types.')
        credential_types.add_argument(
            '--credential-source-file',
            help='Location of the credential source file.')
        credential_types.add_argument(
            '--credential-source-url',
            help='URL to obtain the credential from.')
        if cls._support_pluggable_auth:
            credential_types.add_argument(
                '--executable-command',
                hidden=True,
                help=
                'The full command to run to retrieve the credential. Must be an absolute path for the program.'
            )
        credential_types.add_argument('--aws',
                                      help='Use AWS.',
                                      action='store_true')
        credential_types.add_argument('--azure',
                                      help='Use Azure.',
                                      action='store_true')

        parser.add_argument(
            '--service-account',
            help='The email of the service account to impersonate.')
        parser.add_argument(
            '--credential-source-headers',
            type=arg_parsers.ArgDict(),
            metavar='key=value',
            help='Headers to use when querying the credential-source-url.')
        parser.add_argument(
            '--credential-source-type',
            help='The format of the credential source (JSON or text).')
        parser.add_argument(
            '--credential-source-field-name',
            help=
            'The subject token field name (key) in a JSON credential source.')
        parser.add_argument(
            '--app-id-uri',
            help='The custom Application ID URI for the Azure access token.')
        parser.add_argument(
            '--output-file',
            help=
            'Location to store the generated credential configuration file.',
            required=True)
        parser.add_argument(
            '--subject-token-type',
            help='The type of token being used for authorization.')
        parser.add_argument(
            '--enable-imdsv2',
            help=
            'Adds the AWS IMDSv2 session token Url to the credential source to enforce the AWS IMDSv2 flow.',
            action='store_true')

        if cls._support_pluggable_auth:
            executable_args = parser.add_group(
                hidden=True,
                help='Arguments for an executable type credential source.')

            executable_args.add_argument(
                '--executable-timeout-millis',
                hidden=True,
                type=arg_parsers.Duration(default_unit='ms',
                                          lower_bound='5s',
                                          upper_bound='120s',
                                          parsed_unit='ms'),
                help=
                'The timeout duration in milliseconds for waiting for the executable to finish.'
            )
            executable_args.add_argument(
                '--executable-output-file',
                hidden=True,
                help=
                'The absolute path to the file storing the executable response.'
            )
Beispiel #11
0
def BetaArgsForClusterRef(parser):
    """Register beta-only flags for creating a Dataproc cluster."""
    flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)

    autoscaling_group = parser.add_argument_group()
    flags.AddAutoscalingPolicyResourceArgForCluster(autoscaling_group,
                                                    api_version='v1beta2')

    AddKerberosGroup(parser)

    parser.add_argument('--enable-component-gateway',
                        hidden=True,
                        action='store_true',
                        help="""\
        Enable access to the web UIs of selected components on the cluster
        through the component gateway.
        """)

    parser.add_argument('--max-idle',
                        type=arg_parsers.Duration(),
                        help="""\
        The duration before cluster is auto-deleted after last job completes,
        such as "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)

    auto_delete_group = parser.add_mutually_exclusive_group()
    auto_delete_group.add_argument('--max-age',
                                   type=arg_parsers.Duration(),
                                   help="""\
        The lifespan of the cluster before it is auto-deleted, such as
        "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)

    auto_delete_group.add_argument('--expiration-time',
                                   type=arg_parsers.Datetime.Parse,
                                   help="""\
        The time when cluster will be auto-deleted, such as
        "2017-08-29T18:52:51.142Z." See $ gcloud topic datetimes for
        information on time formats.
        """)

    for instance_type in ('master', 'worker'):
        help_msg = """\
      Attaches accelerators (e.g. GPUs) to the {instance_type}
      instance(s).
      """.format(instance_type=instance_type)
        if instance_type == 'worker':
            help_msg += """
      Note:
      No accelerators will be attached to preemptible workers, because
      preemptible VMs do not support accelerators.
      """
        help_msg += """
      *type*::: The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla
      K80) of accelerator to attach to the instances. Use 'gcloud compute
      accelerator-types list' to learn about all available accelerator
      types.

      *count*::: The number of pieces of the accelerator to attach to each
      of the instances. The default value is 1.
      """
        parser.add_argument('--{0}-accelerator'.format(instance_type),
                            type=arg_parsers.ArgDict(spec={
                                'type': str,
                                'count': int,
                            }),
                            metavar='type=TYPE,[count=COUNT]',
                            help=help_msg)

    AddReservationAffinityGroup(parser)
Beispiel #12
0
def AddSubscriptionSettingsFlags(parser,
                                 is_update=False,
                                 support_message_ordering=False,
                                 support_filtering=False):
    """Adds the flags for creating or updating a subscription.

  Args:
    parser: The argparse parser.
    is_update: Whether or not this is for the update operation (vs. create).
    support_message_ordering: Whether or not flags for ordering should be added.
    support_filtering: Whether or not flags for filtering should be added.
  """
    AddAckDeadlineFlag(parser)
    AddPushConfigFlags(parser)
    AddMessageRetentionFlags(parser, is_update)
    if support_message_ordering and not is_update:
        parser.add_argument(
            '--enable-message-ordering',
            action='store_true',
            default=None,
            help=
            """Whether or not to receive messages with the same ordering key in
            order. If true, messages with the same ordering key will by sent to
            subscribers in the order in which they were received by Cloud
            Pub/Sub.""")
    if support_filtering and not is_update:
        parser.add_argument(
            '--message-filter',
            type=str,
            help=
            """Expression to filter messages. If set, Pub/Sub only delivers the
        messages that match the filter. The expression must be a non-empty
        string in the Pub/Sub filtering language.""")
    current_group = parser
    if is_update:
        mutual_exclusive_group = current_group.add_mutually_exclusive_group()
        mutual_exclusive_group.add_argument(
            '--clear-dead-letter-policy',
            action='store_true',
            default=None,
            help=
            """If set, clear the dead letter policy from the subscription.""")
        current_group = mutual_exclusive_group

    set_dead_letter_policy_group = current_group.add_argument_group(
        help="""Dead Letter Queue Options. The Cloud Pub/Sub service account
           associated with the enclosing subscription's parent project (i.e.,
           service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com)
           must have permission to Publish() to this topic and Acknowledge()
           messages on this subscription.""")
    dead_letter_topic = resource_args.CreateTopicResourceArg(
        'to publish dead letter messages to.',
        flag_name='dead-letter-topic',
        positional=False,
        required=False)
    resource_args.AddResourceArgs(set_dead_letter_policy_group,
                                  [dead_letter_topic])
    set_dead_letter_policy_group.add_argument(
        '--max-delivery-attempts',
        type=arg_parsers.BoundedInt(5, 100),
        default=None,
        help="""Maximum number of delivery attempts for any message. The value
          must be between 5 and 100. Defaults to 5. `--dead-letter-topic`
          must also be specified.""")
    parser.add_argument(
        '--expiration-period',
        type=ParseExpirationPeriodWithNeverSentinel,
        help="""The subscription will expire if it is inactive for the given
          period. Valid values are strings of the form INTEGER[UNIT], where
          UNIT is one of "s", "m", "h", and "d" for seconds, minutes, hours,
          and days, respectively. If the unit is omitted, seconds is
          assumed. This flag additionally accepts the special value "never" to
          indicate that the subscription will never expire.""")

    current_group = parser
    if is_update:
        mutual_exclusive_group = current_group.add_mutually_exclusive_group()
        mutual_exclusive_group.add_argument(
            '--clear-retry-policy',
            action='store_true',
            default=None,
            help="""If set, clear the retry policy from the subscription.""")
        current_group = mutual_exclusive_group

    set_retry_policy_group = current_group.add_argument_group(
        help="""Retry Policy Options. Retry policy specifies how Cloud Pub/Sub
              retries message delivery for this subscription.""")

    set_retry_policy_group.add_argument(
        '--min-retry-delay',
        type=arg_parsers.Duration(lower_bound='0s', upper_bound='600s'),
        help="""The minimum delay between consecutive deliveries of a given
          message. Value should be between 0 and 600 seconds. Defaults to 10
          seconds. Valid values are strings of the form INTEGER[UNIT], where
          UNIT is one of "s", "m", "h", and "d" for seconds, minutes, hours,
          and days, respectively. If the unit is omitted, seconds is
          assumed.""")
    set_retry_policy_group.add_argument(
        '--max-retry-delay',
        type=arg_parsers.Duration(lower_bound='0s', upper_bound='600s'),
        help="""The maximum delay between consecutive deliveries of a given
          message. Value should be between 0 and 600 seconds. Defaults to 10
          seconds. Valid values are strings of the form INTEGER[UNIT], where
          UNIT is one of "s", "m", "h", and "d" for seconds, minutes, hours,
          and days, respectively. If the unit is omitted, seconds is
          assumed.""")
Beispiel #13
0
def ArgsForClusterRef(parser,
                      beta=False,
                      include_deprecated=True,
                      include_ttl_config=False,
                      include_gke_platform_args=False):
  """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
    include_deprecated: whether deprecated flags should be included
    include_ttl_config: whether to include Scheduled Delete(TTL) args
    include_gke_platform_args: whether to include GKE-based cluster args
  """
  labels_util.AddCreateLabelsFlags(parser)
  # 30m is backend timeout + 5m for safety buffer.
  flags.AddTimeoutFlag(parser, default='35m')
  flags.AddZoneFlag(parser, short_flags=include_deprecated)
  flags.AddComponentFlag(parser)

  platform_group = parser.add_argument_group(mutex=True)
  gce_platform_group = platform_group.add_argument_group(help="""\
    Compute Engine options for Dataproc clusters.
    """)

  instances_flags.AddTagsArgs(gce_platform_group)
  gce_platform_group.add_argument(
      '--metadata',
      type=arg_parsers.ArgDict(min_length=1),
      action='append',
      default=None,
      help=('Metadata to be made available to the guest operating system '
            'running on the instances'),
      metavar='KEY=VALUE')

  # Either allow creating a single node cluster (--single-node), or specifying
  # the number of workers in the multi-node cluster (--num-workers and
  # --num-secondary-workers)
  node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
  node_group.add_argument(
      '--single-node',
      action='store_true',
      help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
  # Not mutually exclusive
  worker_group = node_group.add_argument_group(help='Multi-node cluster flags')
  worker_group.add_argument(
      '--num-workers',
      type=int,
      help='The number of worker nodes in the cluster. Defaults to '
      'server-specified.')
  worker_group.add_argument(
      '--secondary-worker-type',
      metavar='TYPE',
      choices=['preemptible', 'non-preemptible'],
      default='preemptible',
      help='The type of the secondary worker group.')
  num_secondary_workers = worker_group.add_argument_group(mutex=True)
  num_secondary_workers.add_argument(
      '--num-preemptible-workers',
      action=actions.DeprecationAction(
          '--num-preemptible-workers',
          warn=('The `--num-preemptible-workers` flag is deprecated. '
                'Use the `--num-secondary-workers` flag instead.')),
      type=int,
      hidden=True,
      help='The number of preemptible worker nodes in the cluster.')
  num_secondary_workers.add_argument(
      '--num-secondary-workers',
      type=int,
      help='The number of secondary worker nodes in the cluster.')

  parser.add_argument(
      '--master-machine-type',
      help='The type of machine to use for the master. Defaults to '
      'server-specified.')
  parser.add_argument(
      '--worker-machine-type',
      help='The type of machine to use for workers. Defaults to '
      'server-specified.')
  image_parser = parser.add_mutually_exclusive_group()
  # TODO(b/73291743): Add external doc link to --image
  image_parser.add_argument(
      '--image',
      metavar='IMAGE',
      help='The custom image used to create the cluster. It can '
           'be the image name, the image URI, or the image family URI, which '
           'selects the latest image from the family.')
  image_parser.add_argument(
      '--image-version',
      metavar='VERSION',
      help='The image version to use for the cluster. Defaults to the '
      'latest version.')
  parser.add_argument(
      '--bucket',
      help="""\
      The Google Cloud Storage bucket to use by default to stage job
      dependencies, miscellaneous config files, and job driver console output
      when using this cluster.
      """)

  netparser = gce_platform_group.add_argument_group(mutex=True)
  netparser.add_argument(
      '--network',
      help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
  netparser.add_argument(
      '--subnet',
      help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
  parser.add_argument(
      '--num-worker-local-ssds',
      type=int,
      help='The number of local SSDs to attach to each worker in a cluster.')
  parser.add_argument(
      '--num-master-local-ssds',
      type=int,
      help='The number of local SSDs to attach to the master in a cluster.')
  secondary_worker_local_ssds = parser.add_argument_group(mutex=True)
  secondary_worker_local_ssds.add_argument(
      '--num-preemptible-worker-local-ssds',
      type=int,
      hidden=True,
      action=actions.DeprecationAction(
          '--num-preemptible-worker-local-ssds',
          warn=('The `--num-preemptible-worker-local-ssds` flag is deprecated. '
                'Use the `--num-secondary-worker-local-ssds` flag instead.')),
      help="""\
      The number of local SSDs to attach to each secondary worker in
      a cluster.
      """)
  secondary_worker_local_ssds.add_argument(
      '--num-secondary-worker-local-ssds',
      type=int,
      help="""\
      The number of local SSDs to attach to each preemptible worker in
      a cluster.
      """)
  parser.add_argument(
      '--initialization-actions',
      type=arg_parsers.ArgList(min_length=1),
      metavar='CLOUD_STORAGE_URI',
      help=('A list of Google Cloud Storage URIs of '
            'executables to run on each node in the cluster.'))
  parser.add_argument(
      '--initialization-action-timeout',
      type=arg_parsers.Duration(),
      metavar='TIMEOUT',
      default='10m',
      help=('The maximum duration of each initialization action. See '
            '$ gcloud topic datetimes for information on duration formats.'))
  parser.add_argument(
      '--num-masters',
      type=arg_parsers.CustomFunctionValidator(
          lambda n: int(n) in [1, 3],
          'Number of masters must be 1 (Standard) or 3 (High Availability)',
          parser=arg_parsers.BoundedInt(1, 3)),
      help="""\
      The number of master nodes in the cluster.

      Number of Masters | Cluster Mode
      --- | ---
      1 | Standard
      3 | High Availability
      """)
  parser.add_argument(
      '--properties',
      type=arg_parsers.ArgDict(),
      action=arg_parsers.UpdateAction,
      default={},
      metavar='PREFIX:PROPERTY=VALUE',
      help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

Prefix | File | Purpose of file
--- | --- | ---
capacity-scheduler | capacity-scheduler.xml | Hadoop YARN Capacity Scheduler configuration
core | core-site.xml | Hadoop general configuration
distcp | distcp-default.xml | Hadoop Distributed Copy configuration
hadoop-env | hadoop-env.sh | Hadoop specific environment variables
hdfs | hdfs-site.xml | Hadoop HDFS configuration
hive | hive-site.xml | Hive configuration
mapred | mapred-site.xml | Hadoop MapReduce configuration
mapred-env | mapred-env.sh | Hadoop MapReduce specific environment variables
pig | pig.properties | Pig configuration
spark | spark-defaults.conf | Spark configuration
spark-env | spark-env.sh | Spark specific environment variables
yarn | yarn-site.xml | Hadoop YARN configuration
yarn-env | yarn-env.sh | Hadoop YARN specific environment variables

See https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/cluster-properties
for more information.

""")
  gce_platform_group.add_argument(
      '--service-account',
      help='The Google Cloud IAM service account to be authenticated as.')
  gce_platform_group.add_argument(
      '--scopes',
      type=arg_parsers.ArgList(min_length=1),
      metavar='SCOPE',
      help="""\
Specifies scopes for the node instances. Multiple SCOPEs can be specified,
separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following *minimum scopes* are necessary for the cluster to function
properly and are always added, even if not explicitly specified:

  {minimum_scopes}

If the `--scopes` flag is not specified, the following *default scopes*
are also included:

  {additional_scopes}

If you want to enable all scopes use the 'cloud-platform' scope.

{scopes_help}
""".format(
    minimum_scopes='\n  '.join(constants.MINIMUM_SCOPE_URIS),
    additional_scopes='\n  '.join(constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
    scopes_help=compute_helpers.SCOPES_HELP))

  if include_deprecated:
    _AddDiskArgsDeprecated(parser)
  else:
    _AddDiskArgs(parser)

  # --no-address is an exception to the no negative-flag style guildline to be
  # consistent with gcloud compute instances create --no-address
  parser.add_argument(
      '--no-address',
      action='store_true',
      help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      If omitted the instances in the cluster will each be assigned an
      ephemeral external IP address.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """)

  boot_disk_type_detailed_help = """\
      The type of the boot disk. The value must be ``pd-standard'' or
      ``pd-ssd''.
      """
  parser.add_argument(
      '--master-boot-disk-type', help=boot_disk_type_detailed_help)
  parser.add_argument(
      '--worker-boot-disk-type', help=boot_disk_type_detailed_help)
  secondary_worker_boot_disk_type = parser.add_argument_group(mutex=True)
  secondary_worker_boot_disk_type.add_argument(
      '--preemptible-worker-boot-disk-type',
      help=boot_disk_type_detailed_help,
      hidden=True,
      action=actions.DeprecationAction(
          '--preemptible-worker-boot-disk-type',
          warn=('The `--preemptible-worker-boot-disk-type` flag is deprecated. '
                'Use the `--secondary-worker-boot-disk-type` flag instead.')))
  secondary_worker_boot_disk_type.add_argument(
      '--secondary-worker-boot-disk-type', help=boot_disk_type_detailed_help)

  autoscaling_group = parser.add_argument_group()
  flags.AddAutoscalingPolicyResourceArgForCluster(
      autoscaling_group, api_version=('v1beta2' if beta else 'v1'))

  if include_ttl_config:
    parser.add_argument(
        '--max-idle',
        type=arg_parsers.Duration(),
        help="""\
          The duration before cluster is auto-deleted after last job completes,
          such as "2h" or "1d".
          See $ gcloud topic datetimes for information on duration formats.
          """)

    auto_delete_group = parser.add_mutually_exclusive_group()
    auto_delete_group.add_argument(
        '--max-age',
        type=arg_parsers.Duration(),
        help="""\
          The lifespan of the cluster before it is auto-deleted, such as
          "2h" or "1d".
          See $ gcloud topic datetimes for information on duration formats.
          """)

    auto_delete_group.add_argument(
        '--expiration-time',
        type=arg_parsers.Datetime.Parse,
        help="""\
          The time when cluster will be auto-deleted, such as
          "2017-08-29T18:52:51.142Z." See $ gcloud topic datetimes for
          information on time formats.
          """)

  AddKerberosGroup(parser)

  flags.AddMinCpuPlatformArgs(parser)

  _AddAcceleratorArgs(parser)

  AddReservationAffinityGroup(
      gce_platform_group,
      group_text='Specifies the reservation for the instance.',
      affinity_text='The type of reservation for the instance.')
  if include_gke_platform_args:
    gke_based_cluster_group = platform_group.add_argument_group(
        hidden=True,
        help="""\
          Options for creating a GKE-based Dataproc cluster. Specifying any of these
          will indicate that this cluster is intended to be a GKE-based cluster.
          These options are mutually exclusive with GCE-based options.
          """)
    gke_based_cluster_group.add_argument(
        '--gke-cluster',
        hidden=True,
        help="""\
            Required for GKE-based clusters. Specify the name of the GKE cluster to
            deploy this GKE-based Dataproc cluster to. This should be the short name
            and not the full path name.
            """)
    gke_based_cluster_group.add_argument(
        '--gke-cluster-namespace',
        hidden=True,
        help="""\
            Optional. Specify the name of the namespace to deploy Dataproc system
            components into. This namespace does not need to already exist.
            """)
def AddCdnPolicyArgs(parser, resource_name, update_command=False):
  """Adds cache mode, max ttl, default ttl, client ttl, custom response header, negative caching, negative caching policy, request coalescing, serve while stale, and bypass cache on request headers args to the argparse."""
  # TODO (b/165456063) document enums as lowercase-with-dash. Accept both forms.
  parser.add_argument(
      '--cache-mode',
      choices={
          'CACHE_ALL_STATIC':
              """Automatically cache static content, including common image
              formats, media (video and audio), web assets (JavaScript and CSS).
              Requests and responses that are marked as uncacheable, as well as
              dynamic content (including HTML), aren't cached.""",
          'USE_ORIGIN_HEADERS':
              """Require the origin to set valid caching headers to cache
              content. Responses without these headers aren't cached at
              Google's edge, and require a full trip to the origin on every
              request, potentially impacting performance and increasing load on
              the origin server.""",
          'FORCE_CACHE_ALL':
              """Cache all content, ignoring any "private", "no-store" or
              "no-cache" directives in Cache-Control response headers. Warning:
              this may result in Cloud CDN caching private, per-user (user
              identifiable) content. You should only enable this on backends
              that are not serving private or dynamic content, such as storage
              buckets."""
      },
      type=lambda x: x.replace('-', '_').upper(),
      default=None,
      help="""\
      Specifies the cache setting for all responses from this backend.
      """)
  client_ttl_help = """\
  Specifies a separate client (for example, browser client) TTL, separate from the TTL
  for Cloud CDN's edge caches.

  This allows you to set a shorter TTL for browsers/clients, and to have those
  clients revalidate content against Cloud CDN on a more regular basis, without
  requiring revalidation at the origin.

  The value of clientTtl cannot be set to a value greater than that of maxTtl,
  but can be equal.

  Any cacheable response has its max-age/s-maxage directives adjusted down to
  the client TTL value if necessary; an Expires header will be replaced with a
  suitable max-age directive.

  The maximum allowed value is 86400s (1 day).

  When creating a new backend with CACHE_ALL_STATIC and the field is unset, or
  when switching to that mode and the field is unset, a default value of 3600
  is used.

  When the cache mode is set to "USE_ORIGIN_HEADERS", you must omit this field.
  """
  client_ttl_group = parser.add_mutually_exclusive_group()
  client_ttl_group.add_argument(
      '--client-ttl',
      type=arg_parsers.Duration(upper_bound=86400),
      default=None,
      help=client_ttl_help,
  )
  if update_command:
    client_ttl_group.add_argument(
        '--no-client-ttl', action='store_true', help='Clears client TTL value.')
  default_ttl_help = """\
  Specifies the default TTL for cached content served by this origin for
  responses that do not have an existing valid TTL (max-age or s-maxage).

  The default value is 3600s for cache modes that allow a default TTL to be
  defined.

  The value of defaultTtl cannot be set to a value greater than that of maxTtl,
  but can be equal.

  When the cacheMode is set to FORCE_CACHE_ALL, the defaultTtl overwrites
  the TTL set in all responses.

  A TTL of "0" means Always revalidate.

  The maximum allowed value is 31,622,400s (1 year). Infrequently
  accessed objects may be evicted from the cache before the defined TTL.

  When creating a new backend with CACHE_ALL_STATIC or FORCE_CACHE_ALL and the
  field is unset, or when updating an existing backend to use these modes and
  the field is unset, a default value of 3600 is used. When the cache mode is
  set to "USE_ORIGIN_HEADERS", you must omit this field.
  """
  default_ttl_group = parser.add_mutually_exclusive_group()
  default_ttl_group.add_argument(
      '--default-ttl',
      type=arg_parsers.Duration(upper_bound=31622400),
      default=None,
      help=default_ttl_help,
  )
  if update_command:
    default_ttl_group.add_argument(
        '--no-default-ttl',
        action='store_true',
        help='Clears default TTL value.')
  max_ttl_help = """\
  Specifies the maximum allowed TTL for cached content served by this origin.

  The default value is 86400 for cache modes that support a max TTL.

  Cache directives that attempt to set a max-age or s-maxage higher than this,
  or an Expires header more than maxTtl seconds in the future, are capped at
  the value of maxTtl, as if it were the value of an s-maxage Cache-Control
  directive.

  A TTL of "0" means Always revalidate.

  The maximum allowed value is 31,622,400s (1 year). Infrequently
  accessed objects may be evicted from the cache before the defined TTL.

  When creating a new backend with CACHE_ALL_STATIC and the field is unset, or
  when updating an existing backend to use these modes and the field is unset,
  a default value of 86400 is used. When the cache mode is set to
  "USE_ORIGIN_HEADERS" or "FORCE_CACHE_ALL", you must omit this field.
  """
  max_ttl_group = parser.add_mutually_exclusive_group()
  max_ttl_group.add_argument(
      '--max-ttl',
      type=arg_parsers.Duration(upper_bound=31622400),
      default=None,
      help=max_ttl_help,
  )
  if update_command:
    max_ttl_group.add_argument(
        '--no-max-ttl', action='store_true', help='Clears max TTL value.')
  custom_response_header_help = """\
  Custom headers that the external HTTP(S) load balancer adds to proxied responses.
  For the list of headers, see [Creating custom headers](https://cloud.google.com/load-balancing/docs/custom-headers).

  Variables are not case-sensitive.
  """
  custom_response_header_group = parser.add_mutually_exclusive_group()
  custom_response_header_group.add_argument(
      '--custom-response-header',
      action='append',
      help=custom_response_header_help)
  if update_command:
    custom_response_header_group.add_argument(
        '--no-custom-response-headers',
        action='store_true',
        help='Remove all custom response headers for the %s.' % resource_name)

  negative_caching_help = """\
    Negative caching allows per-status code cache TTLs to be set, in order to
    apply fine-grained caching for common errors or redirects. This can reduce
    the load on your origin and improve the end-user experience by reducing response
    latency.

    Negative caching applies to a set of 3xx, 4xx, and 5xx status codes that are
    typically useful to cache.

    Status codes not listed here cannot have their TTL explicitly set and aren't
    cached, in order to avoid cache poisoning attacks.

    HTTP success codes (HTTP 2xx) are handled by the values of defaultTtl and
    maxTtl.

    When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, these
    values apply to responses with the specified response code that lack any
    `cache-control` or `expires` headers.

    When the cache mode is set to FORCE_CACHE_ALL, these values apply to all
    responses with the specified response code, and override any caching headers.

    Cloud CDN applies the following default TTLs to these status codes:
    - HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m
    - HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s
    - HTTP 405 (Method Not Found), 421 (Misdirected Request),
      501 (Not Implemented): 60s

    These defaults can be overridden in cdnPolicy.negativeCachingPolicy
    """
  negative_caching_group = parser.add_mutually_exclusive_group()
  if update_command:
    negative_caching_group.add_argument(
        '--negative-caching',
        action=arg_parsers.StoreTrueFalseAction,
        help=negative_caching_help)
  else:
    parser.add_argument(
        '--negative-caching',
        action=arg_parsers.StoreTrueFalseAction,
        help=negative_caching_help)
  negative_caching_policy_help = """\
    Sets a cache TTL for the specified HTTP status code.

    NegativeCaching must be enabled to config the negativeCachingPolicy.

    If you omit the policy and leave negativeCaching enabled, Cloud CDN's default
    cache TTLs are used.

    Note that when specifying an explicit negative caching policy, make sure that
    you specify a cache TTL for all response codes that you want to cache. Cloud
    CDN doesn't apply any default negative caching when a policy exists.

    *CODE* is the HTTP status code to define a TTL against. Only HTTP status codes
    300, 301, 308, 404, 405, 410, 421, 451, and 501 can be specified as values,
    and you cannot specify a status code more than once.

    TTL is the time to live (in seconds) for which to cache responses for the
    specified *CODE*. The maximum allowed value is 1800s (30 minutes), noting that
    infrequently accessed objects may be evicted from the cache before the defined TTL.
    """
  negative_caching_group.add_argument(
      '--negative-caching-policy',
      type=arg_parsers.ArgDict(key_type=int, value_type=int),
      metavar='[CODE=TTL]',
      help=negative_caching_policy_help)
  if update_command:
    negative_caching_group.add_argument(
        '--no-negative-caching-policies',
        action='store_true',
        help='Remove all negative caching policies for the %s.' % resource_name)

  serve_while_stale_help = """\
  Serve existing content from the cache (if available) when revalidating
  content with the origin; this allows content to be served more quickly, and
  also allows content to continue to be served if the backend is down or
  reporting errors.

  This setting defines the default serve-stale duration for any cached responses
  that do not specify a stale-while-revalidate directive. Stale responses that
  exceed the TTL configured here will not be served without first being
  revalidated with the origin. The default limit is 86400s (1 day), which will
  allow stale content to be served up to this limit beyond the max-age
  (or s-max-age) of a cached response.

  The maximum allowed value is 604800 (1 week).

  Set this to zero (0) to disable serve-while-stale.
  """
  serve_while_stale_group = parser.add_mutually_exclusive_group()
  serve_while_stale_group.add_argument(
      '--serve-while-stale',
      type=arg_parsers.Duration(upper_bound=604800),
      default=None,
      help=serve_while_stale_help,
  )
  if update_command:
    serve_while_stale_group.add_argument(
        '--no-serve-while-stale',
        action='store_true',
        help='Clears serve while stale value.')
  bypass_cache_on_request_headers_help = """\
  Bypass the cache when the specified request headers are matched - e.g.
  Pragma or Authorization headers. Up to 5 headers can be specified.

  The cache is bypassed for all cdnPolicy.cacheMode settings.

  Note that requests that include these headers will always fill from origin,
  and may result in a large number of cache misses if the specified headers are
  common to many requests.

  Values are case-insensitive.

  The header name must be a valid HTTP header field token (per RFC 7230).

  For the list of restricted headers, see the list of required header name
  properties in [How custom headers work](https://cloud.google.com/load-balancing/docs/custom-headers#how_custom_headers_work).

  A header name must not appear more than once in the list of added headers.
  """
  bypass_cache_on_request_headers_group = parser.add_mutually_exclusive_group()
  bypass_cache_on_request_headers_group.add_argument(
      '--bypass-cache-on-request-headers',
      action='append',
      help=bypass_cache_on_request_headers_help)
  if update_command:
    bypass_cache_on_request_headers_group.add_argument(
        '--no-bypass-cache-on-request-headers',
        action='store_true',
        help='Remove all bypass cache on request headers for the %s.' %
        resource_name)
  request_coalescing_help = """\
  Enables request coalescing to the backend (recommended).

  Request coalescing (or collapsing) combines multiple concurrent cache fill
  requests into a small number of requests to the origin. This can improve
  performance by putting less load on the origin and backend infrastructure.
  However, coalescing adds a small amount of latency when multiple requests to
  the same URL are processed, so for latency-critical applications it may not
  be desirable.

  Defaults to true.
  """
  parser.add_argument(
      '--request-coalescing',
      action=arg_parsers.StoreTrueFalseAction,
      help=request_coalescing_help)
Beispiel #15
0
def ParseExpirationPeriodWithNeverSentinel(value):
    if value == subscriptions.NEVER_EXPIRATION_PERIOD_VALUE:
        return value
    return util.FormatDuration(arg_parsers.Duration()(value))
Beispiel #16
0
    def Args(cls, parser):
        parser.display_info.AddFormat(flags.DEFAULT_LIST_FORMAT)
        cls.HTTPS_HEALTH_CHECKS_ARG = flags.HttpsHealthCheckArgument()
        cls.HTTPS_HEALTH_CHECKS_ARG.AddArgument(parser,
                                                operation_type='create')
        parser.display_info.AddCacheUpdater(
            completers.HttpsHealthChecksCompleter)

        parser.add_argument('--host',
                            help="""\
        The value of the host header used in this HTTPS health check request.
        By default, this is empty and Google Compute Engine automatically sets
        the host header in health requests to the same external IP address as
        the forwarding rule associated with the target pool.
        """)

        parser.add_argument('--port',
                            type=int,
                            default=443,
                            help="""\
        The TCP port number that this health check monitors. The default value
        is 443.
        """)

        parser.add_argument('--request-path',
                            default='/',
                            help="""\
        The request path that this health check monitors. For example,
        ``/healthcheck''. The default value is ``/''.
        """)

        parser.add_argument('--check-interval',
                            type=arg_parsers.Duration(),
                            default='5s',
                            help="""\
        How often to perform a health check for an instance. For example,
        specifying ``10s'' will run the check every 10 seconds. The default
        value is ``5s''.
        See $ gcloud topic datetimes for information on duration formats.
        """)

        parser.add_argument('--timeout',
                            type=arg_parsers.Duration(),
                            default='5s',
                            help="""\
        If Google Compute Engine doesn't receive an HTTPS 200 response from the
        instance by the time specified by the value of this flag, the health
        check request is considered a failure. For example, specifying ``10s''
        will cause the check to wait for 10 seconds before considering the
        request a failure. The default value is ``5s''.
        See $ gcloud topic datetimes for information on duration formats.
        """)

        parser.add_argument('--unhealthy-threshold',
                            type=int,
                            default=2,
                            help="""\
        The number of consecutive health check failures before a healthy
        instance is marked as unhealthy. The default is 2.
        """)

        parser.add_argument('--healthy-threshold',
                            type=int,
                            default=2,
                            help="""\
        The number of consecutive successful health checks before an
        unhealthy instance is marked as healthy. The default is 2.
        """)

        parser.add_argument(
            '--description',
            help='An optional, textual description for the HTTPS health check.'
        )
Beispiel #17
0
def AddMinReadyArg(parser):
    parser.add_argument(
        '--min-ready',
        type=arg_parsers.Duration(lower_bound='0s'),
        help=('Minimum time for which a newly created instance '
              'should be ready to be considered available.'))
Beispiel #18
0
def AddUpdatableArgs(parser,
                     compute_messages,
                     default_protocol='HTTP',
                     default_timeout='30s'):
  """Adds top-level backend service arguments that can be updated."""
  parser.add_argument(
      '--description',
      help='An optional, textual description for the backend service.')

  http_health_checks = parser.add_argument(
      '--http-health-checks',
      type=arg_parsers.ArgList(min_length=1),
      metavar='HTTP_HEALTH_CHECK',
      action=arg_parsers.FloatingListValuesCatcher(),
      help=('Specifies a list of HTTP health check objects for checking the '
            'health of the backend service.'))
  http_health_checks.detailed_help = """\
      Specifies a list of HTTP health check objects for checking the health
      of the backend service.
      """

  https_health_checks = parser.add_argument(
      '--https-health-checks',
      type=arg_parsers.ArgList(min_length=1),
      metavar='HTTPS_HEALTH_CHECK',
      action=arg_parsers.FloatingListValuesCatcher(),
      help=('Specifies a list of HTTPS health check objects for checking the '
            'health of the backend service.'))
  https_health_checks.detailed_help = """\
      Specifies a list of HTTPS health check objects for checking the health
      of the backend service.
      """

  timeout = parser.add_argument(
      '--timeout',
      default=default_timeout,
      type=arg_parsers.Duration(),
      help=('The amount of time to wait for a backend to respond to a '
            'request before considering the request failed.'))
  timeout.detailed_help = """\
      The amount of time to wait for a backend to respond to a request
      before considering the request failed. For example, specifying
      ``10s'' will give backends 10 seconds to respond to
      requests. Valid units for this flag are ``s'' for seconds, ``m''
      for minutes, and ``h'' for hours.
      """
  # TODO(user): Remove port once port_name is in use. b/16486110
  parser.add_argument(
      '--port',
      type=int,
      help=('The TCP port to use when connecting to the backend. '
            '--port is being deprecated in favor of --port-name.'))

  port_name = parser.add_argument(
      '--port-name',
      help=('A user-defined port name used to resolve which port to use on '
            'each backend.'))
  port_name.detailed_help = """\
      The name of a service that has been added to an instance group
      in this backend. Instance group services map a name to a port
      number which is used by the load balancing service.
      Only one ``port-name'' may be added to a backend service, and that
      name must exist as a service on all instance groups that are a
      part of this backend service. The port number associated with the
      name may differ between instances. If you do not specify
      this flag, your instance groups must have a service named ``http''
      configured. See also
      `gcloud compute instance-groups set-named-ports --help`.
      """

  parser.add_argument(
      '--protocol',
      choices=ProtocolOptions(compute_messages.BackendService),
      default=default_protocol,
      type=lambda x: x.upper(),
      help='The protocol for incoming requests.')
def AddAutoscalerArgs(parser,
                      multizonal_enabled=False,
                      queue_scaling_enabled=False):
    """Adds commandline arguments to parser."""
    parser.add_argument(
        'name',
        metavar='NAME',
        completion_resource='compute.instanceGroupManagers',
        help='Managed instance group which autoscaling parameters will be set.'
    )
    parser.add_argument('--cool-down-period',
                        type=arg_parsers.Duration(),
                        help='Number of seconds Autoscaler will wait between '
                        'resizing collection.')
    parser.add_argument('--description', help='Notes about Autoscaler.')
    parser.add_argument('--min-num-replicas',
                        type=arg_parsers.BoundedInt(0, sys.maxint),
                        help='Minimum number of replicas Autoscaler will set.')
    parser.add_argument('--max-num-replicas',
                        type=arg_parsers.BoundedInt(0, sys.maxint),
                        required=True,
                        help='Maximum number of replicas Autoscaler will set.')
    parser.add_argument('--scale-based-on-cpu',
                        action='store_true',
                        help='Use autoscaling based on cpu utilization.')
    parser.add_argument('--scale-based-on-load-balancing',
                        action='store_true',
                        help=('Use autoscaling based on load balancing '
                              'utilization.'))
    parser.add_argument('--target-cpu-utilization',
                        type=arg_parsers.BoundedFloat(0.0, 1.0),
                        help='CPU utilization level Autoscaler will aim to '
                        'maintain (0.0 to 1.0).')
    parser.add_argument(
        '--target-load-balancing-utilization',
        type=arg_parsers.BoundedFloat(0.0, None),
        help='Load balancing utilization level Autoscaler will '
        'aim to maintain (greater than 0.0).')
    custom_metric_utilization = parser.add_argument(
        '--custom-metric-utilization',
        type=arg_parsers.ArgDict(spec={
            'metric': str,
            'utilization-target': float,
            'utilization-target-type': str,
        }, ),
        # pylint:disable=protected-access
        action=arg_parsers.FloatingListValuesCatcher(argparse._AppendAction),
        help=(
            'Adds target value of a Google Cloud Monitoring metric Autoscaler '
            'will aim to maintain.'),
        metavar='PROPERTY=VALUE',
    )
    custom_metric_utilization.detailed_help = """
   Adds a target metric value for the to the Autoscaler.

   *metric*::: Protocol-free URL of a Google Cloud Monitoring metric.

   *utilization-target*::: Value of the metric Autoscaler will aim to maintain
   (greater than 0.0).

   *utilization-target-type*::: How target is expressed. Valid values: {0}.
  """.format(', '.join(_ALLOWED_UTILIZATION_TARGET_TYPES))

    if queue_scaling_enabled:
        cloud_pub_sub_spec = parser.add_argument(
            '--queue-scaling-cloud-pub-sub',
            type=arg_parsers.ArgDict(spec={
                'topic': str,
                'subscription': str,
            }, ),
            help='Scaling based on Cloud Pub/Sub queuing system.',
            metavar='PROPERTY=VALUE',
        )
        cloud_pub_sub_spec.detailed_help = """
     Specifies queue-based scaling based on a Cloud Pub/Sub queuing system.
     Both topic and subscription are required.

     *topic*::: Topic specification. Can be just a name or a partial URL
     (starting with "projects/..."). Topic must belong to the same project as
     Autoscaler.

     *subscription*::: Subscription specification. Can be just a name or a
     partial URL (starting with "projects/..."). Subscription must belong to the
     same project as Autoscaler and must be connected to the specified topic.
    """
        parser.add_argument(
            '--queue-scaling-acceptable-backlog-per-instance',
            type=arg_parsers.BoundedFloat(0.0, None),
            help='Queue-based scaling target: auotscaler will aim '
            'to assure that average number of tasks in the queue '
            'is no greater than this value.',
        )
        parser.add_argument(
            '--queue-scaling-single-worker-throughput',
            type=arg_parsers.BoundedFloat(0.0, None),
            help='Hint the autoscaler for queue-based scaling on '
            'how much throughput a single worker instance is able '
            'to consume.')
    if multizonal_enabled:
        scope_parser = parser.add_mutually_exclusive_group()
        flags.AddRegionFlag(
            scope_parser,
            resource_type='resources',
            operation_type='update',
            explanation=flags.REGION_PROPERTY_EXPLANATION_NO_DEFAULT)
        flags.AddZoneFlag(
            scope_parser,
            resource_type='resources',
            operation_type='update',
            explanation=flags.ZONE_PROPERTY_EXPLANATION_NO_DEFAULT)
    else:
        flags.AddZoneFlag(parser,
                          resource_type='resources',
                          operation_type='update')
Beispiel #20
0
    def Args(cls, parser):
        cls.HTTP_HEALTH_CHECKS_ARG = flags.HttpHealthCheckArgument()
        cls.HTTP_HEALTH_CHECKS_ARG.AddArgument(parser, operation_type='update')

        parser.add_argument('--host',
                            help="""\
        The value of the host header used in this HTTP health check request.
        By default, this is empty and Google Compute Engine automatically sets
        the host header in health requests to the same external IP address as
        the forwarding rule associated with the target pool. Setting this to
        an empty string will clear any existing host value.
        """)

        parser.add_argument('--port',
                            type=int,
                            help="""\
        The TCP port number that this health check monitors.
        """)

        parser.add_argument('--request-path',
                            help="""\
        The request path that this health check monitors. For example,
        ``/healthcheck''.
        """)

        parser.add_argument('--check-interval',
                            type=arg_parsers.Duration(),
                            help="""\
        How often to perform a health check for an instance. For example,
        specifying ``10s'' will run the check every 10 seconds.
        See $ gcloud topic datetimes for information on duration formats.
        """)

        parser.add_argument('--timeout',
                            type=arg_parsers.Duration(),
                            help="""\
        If Google Compute Engine doesn't receive an HTTP 200 response from the
        instance by the time specified by the value of this flag, the health
        check request is considered a failure. For example, specifying ``10s''
        will cause the check to wait for 10 seconds before considering the
        request a failure.  Valid units for this flag are ``s'' for seconds and
        ``m'' for minutes.
        """)

        parser.add_argument('--unhealthy-threshold',
                            type=int,
                            help="""\
        The number of consecutive health check failures before a healthy
        instance is marked as unhealthy.
        """)

        parser.add_argument('--healthy-threshold',
                            type=int,
                            help="""\
        The number of consecutive successful health checks before an
        unhealthy instance is marked as healthy.
        """)

        parser.add_argument(
            '--description',
            help=(
                'A textual description for the HTTP health check. Pass in an '
                'empty string to unset.'))
Beispiel #21
0
def _CommonArgs(parser):
    """Register flags common to all tracks."""
    instances_flags.AddTagsArgs(parser)
    base.ASYNC_FLAG.AddToParser(parser)
    parser.add_argument(
        '--metadata',
        type=arg_parsers.ArgDict(min_length=1),
        action='append',
        default=None,
        help=('Metadata to be made available to the guest operating system '
              'running on the instances'),
        metavar='KEY=VALUE')
    parser.add_argument('name', help='The name of this cluster.')
    parser.add_argument(
        '--num-workers',
        type=int,
        help='The number of worker nodes in the cluster. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--num-preemptible-workers',
        type=int,
        help='The number of preemptible worker nodes in the cluster.')
    parser.add_argument(
        '--master-machine-type',
        help='The type of machine to use for the master. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--worker-machine-type',
        help='The type of machine to use for workers. Defaults to '
        'server-specified.')
    parser.add_argument('--image', help=argparse.SUPPRESS)
    parser.add_argument(
        '--image-version',
        metavar='VERSION',
        help='The image version to use for the cluster. Defaults to the '
        'latest version.')
    parser.add_argument(
        '--bucket',
        help='The Google Cloud Storage bucket to use with the Google Cloud '
        'Storage connector. A bucket is auto created when this parameter is '
        'not specified.')

    netparser = parser.add_mutually_exclusive_group()
    network = netparser.add_argument(
        '--network',
        help='Specifies the network that the cluster will be part of.')
    network.detailed_help = """\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """
    subnet = netparser.add_argument(
        '--subnet',
        help='Specifies the subnet that the cluster will be part of.')
    subnet.detailed_help = """\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """
    parser.add_argument(
        '--zone',
        '-z',
        help='The compute zone (e.g. us-central1-a) for the cluster.',
        action=actions.StoreProperty(properties.VALUES.compute.zone))
    parser.add_argument(
        '--num-worker-local-ssds',
        type=int,
        help='The number of local SSDs to attach to each worker in a cluster.')
    parser.add_argument(
        '--num-master-local-ssds',
        type=int,
        help='The number of local SSDs to attach to the master in a cluster.')
    parser.add_argument(
        '--initialization-actions',
        type=arg_parsers.ArgList(min_length=1),
        metavar='CLOUD_STORAGE_URI',
        help=('A list of Google Cloud Storage URIs of '
              'executables to run on each node in the cluster.'))
    parser.add_argument(
        '--initialization-action-timeout',
        type=arg_parsers.Duration(),
        metavar='TIMEOUT',
        default='10m',
        help='The maximum duration of each initialization action.')
    properties_parser = parser.add_argument(
        '--properties',
        type=arg_parsers.ArgDict(),
        metavar='PREFIX:PROPERTY=VALUE',
        default={},
        help='Specifies cluster configuration properties.')
    properties_parser.detailed_help = """\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

[format="csv",options="header"]
|========
Prefix,Target Configuration File
core,core-site.xml
hdfs,hdfs-site.xml
mapred,mapred-site.xml
yarn,yarn-site.xml
hive,hive-site.xml
pig,pig.properties
spark,spark-defaults.conf
|========

"""
    parser.add_argument(
        '--service-account',
        help='The Google Cloud IAM service account to be authenticated as.')
    scope_parser = parser.add_argument(
        '--scopes',
        type=arg_parsers.ArgList(min_length=1),
        metavar='SCOPE',
        help="Specifies scopes for the node instances. The project's default "
        'service account is used.')
    scope_parser.detailed_help = """\
Specifies scopes for the node instances. The project's default service account
is used. Multiple SCOPEs can specified, separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following scopes necessary for the cluster to function properly are always
added, even if not explicitly specified:

[format="csv"]
|========
{minimum_scopes}
|========

If this flag is not specified the following default scopes are also included:

[format="csv"]
|========
{additional_scopes}
|========

If you want to enable all scopes use the 'cloud-platform' scope.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========
""".format(minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS),
           additional_scopes='\n'.join(
               constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
           aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP)

    master_boot_disk = parser.add_mutually_exclusive_group()
    worker_boot_disk = parser.add_mutually_exclusive_group()

    # Deprecated, to be removed at a future date.
    master_boot_disk.add_argument('--master-boot-disk-size-gb',
                                  type=int,
                                  help=argparse.SUPPRESS)
    worker_boot_disk.add_argument('--worker-boot-disk-size-gb',
                                  type=int,
                                  help=argparse.SUPPRESS)

    boot_disk_size_detailed_help = """\
      The size of the boot disk. The value must be a
      whole number followed by a size unit of ``KB'' for kilobyte, ``MB''
      for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,
      ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk
      can have is 10 GB. Disk size must be a multiple of 1 GB.
      """
    master_boot_disk_size = master_boot_disk.add_argument(
        '--master-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help='The size of the boot disk of the master in a cluster.')
    master_boot_disk_size.detailed_help = boot_disk_size_detailed_help
    worker_boot_disk_size = worker_boot_disk.add_argument(
        '--worker-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help='The size of the boot disk of each worker in a cluster.')
    worker_boot_disk_size.detailed_help = boot_disk_size_detailed_help

    preemptible_worker_boot_disk_size = parser.add_argument(
        '--preemptible-worker-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help='The size of the boot disk of each premptible worker in a '
        'cluster.')
    preemptible_worker_boot_disk_size.detailed_help = """\
Beispiel #22
0
    def Args(cls, parser):
        dataproc = dp.Dataproc(cls.ReleaseTrack())
        base.ASYNC_FLAG.AddToParser(parser)
        # Allow the user to specify new labels as well as update/remove existing
        labels_util.AddUpdateLabelsFlags(parser)
        # Updates can take hours if a lot of data needs to be moved on HDFS
        flags.AddTimeoutFlag(parser, default='3h')
        flags.AddClusterResourceArg(parser, 'update', dataproc.api_version)
        parser.add_argument(
            '--num-workers',
            type=int,
            help='The new number of worker nodes in the cluster.')
        parser.add_argument(
            '--num-preemptible-workers',
            type=int,
            help='The new number of preemptible worker nodes in the cluster.')

        parser.add_argument('--graceful-decommission-timeout',
                            type=arg_parsers.Duration(lower_bound='0s',
                                                      upper_bound='1d'),
                            help="""
              The graceful decommission timeout for decommissioning Node Managers
              in the cluster, used when removing nodes. Graceful decommissioning
              allows removing nodes from the cluster without interrupting jobs in
              progress. Timeout specifies how long to wait for jobs in progress to
              finish before forcefully removing nodes (and potentially
              interrupting jobs). Timeout defaults to 0 if not set (for forceful
              decommission), and the maximum allowed timeout is 1 day.
              See $ gcloud topic datetimes for information on duration formats.
              """)

        idle_delete_group = parser.add_mutually_exclusive_group()
        idle_delete_group.add_argument('--max-idle',
                                       type=arg_parsers.Duration(),
                                       help="""\
        The duration before cluster is auto-deleted after last job finished,
        such as "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)
        idle_delete_group.add_argument('--no-max-idle',
                                       action='store_true',
                                       help="""\
        Cancels the cluster auto-deletion by cluster idle duration (configured
         by --max-idle flag)
        """)

        auto_delete_group = parser.add_mutually_exclusive_group()
        auto_delete_group.add_argument('--max-age',
                                       type=arg_parsers.Duration(),
                                       help="""\
        The lifespan of the cluster before it is auto-deleted, such as
        "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)
        auto_delete_group.add_argument('--expiration-time',
                                       type=arg_parsers.Datetime.Parse,
                                       help="""\
        The time when cluster will be auto-deleted, such as
        "2017-08-29T18:52:51.142Z". See $ gcloud topic datetimes for
        information on time formats.
        """)
        auto_delete_group.add_argument('--no-max-age',
                                       action='store_true',
                                       help="""\
        Cancels the cluster auto-deletion by maximum cluster age (configured by
         --max-age or --expiration-time flags)
        """)

        # Can only specify one of --autoscaling-policy or --disable-autoscaling
        autoscaling_group = parser.add_mutually_exclusive_group()
        flags.AddAutoscalingPolicyResourceArgForCluster(autoscaling_group,
                                                        api_version='v1')
        autoscaling_group.add_argument('--disable-autoscaling',
                                       action='store_true',
                                       help="""\
        Disable autoscaling, if it is enabled. This is an alias for passing the
        empty string to --autoscaling-policy'.
        """)
    def Args(parser):
        flags.AddTemplateResourceArg(parser, 'set managed cluster')
        parser.add_argument('--cluster-name',
                            help="""\
        The name of the managed dataproc cluster.
        If unspecified, the workflow template ID will be used.""")
        clusters.ArgsForClusterRef(parser, beta=True)
        flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)

        # TODO(b/70164645): Consolidate these arguments with the other beta args
        # All arguments for these arguments are duplicated from the cluster creation
        # beta track. There should be a ArgsForClusterRefBeta method in clusters.py
        # that is invoked here so that we don't have to duplicate the arguments.
        parser.add_argument('--max-idle',
                            type=arg_parsers.Duration(),
                            help="""\
        The duration before cluster is auto-deleted after last job completes,
        such as "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)

        auto_delete_group = parser.add_mutually_exclusive_group()
        auto_delete_group.add_argument('--max-age',
                                       type=arg_parsers.Duration(),
                                       help="""\
        The lifespan of the cluster before it is auto-deleted, such as
        "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)

        auto_delete_group.add_argument('--expiration-time',
                                       type=arg_parsers.Datetime.Parse,
                                       help="""\
        The time when cluster will be auto-deleted, such as
        "2017-08-29T18:52:51.142Z." See $ gcloud topic datetimes for
        information on time formats.
        """)

        for instance_type in ('master', 'worker'):
            help_msg = """\
      Attaches accelerators (e.g. GPUs) to the {instance_type}
      instance(s).
      """.format(instance_type=instance_type)
            if instance_type == 'worker':
                help_msg += """
      Note:
      No accelerators will be attached to preemptible workers, because
      preemptible VMs do not support accelerators.
      """
            help_msg += """
      *type*::: The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla
      K80) of accelerator to attach to the instances. Use 'gcloud compute
      accelerator-types list' to learn about all available accelerator
      types.

      *count*::: The number of pieces of the accelerator to attach to each
      of the instances. The default value is 1.
      """
            parser.add_argument('--{0}-accelerator'.format(instance_type),
                                type=arg_parsers.ArgDict(spec={
                                    'type': str,
                                    'count': int,
                                }),
                                metavar='type=TYPE,[count=COUNT]',
                                help=help_msg)
Beispiel #24
0
def ArgsForClusterRef(parser, beta=False):
  """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
  """
  labels_util.AddCreateLabelsFlags(parser)
  instances_flags.AddTagsArgs(parser)
  # 30m is backend timeout + 5m for safety buffer.
  flags.AddTimeoutFlag(parser, default='35m')
  flags.AddZoneFlag(parser)

  parser.add_argument(
      '--metadata',
      type=arg_parsers.ArgDict(min_length=1),
      action='append',
      default=None,
      help=('Metadata to be made available to the guest operating system '
            'running on the instances'),
      metavar='KEY=VALUE')

  # Either allow creating a single node cluster (--single-node), or specifying
  # the number of workers in the multi-node cluster (--num-workers and
  # --num-preemptible-workers)
  node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
  node_group.add_argument(
      '--single-node',
      action='store_true',
      help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
  # Not mutually exclusive
  worker_group = node_group.add_argument_group(help='Multi-node cluster flags')
  worker_group.add_argument(
      '--num-workers',
      type=int,
      help='The number of worker nodes in the cluster. Defaults to '
      'server-specified.')
  worker_group.add_argument(
      '--num-preemptible-workers',
      type=int,
      help='The number of preemptible worker nodes in the cluster.')

  parser.add_argument(
      '--master-machine-type',
      help='The type of machine to use for the master. Defaults to '
      'server-specified.')
  parser.add_argument(
      '--worker-machine-type',
      help='The type of machine to use for workers. Defaults to '
      'server-specified.')
  if beta:
    image_parser = parser.add_mutually_exclusive_group()
    # TODO(b/73291743): Add external doc link to --image
    image_parser.add_argument(
        '--image',
        metavar='IMAGE',
        help='The full custom image URI or the custom image name that '
        'will be used to create a cluster.')
    image_parser.add_argument(
        '--image-version',
        metavar='VERSION',
        help='The image version to use for the cluster. Defaults to the '
        'latest version.')
  else:
    parser.add_argument(
        '--image',
        hidden=True,
        help='The full image URI to use with the cluster. Overrides '
        '--image-version.')
    parser.add_argument(
        '--image-version',
        metavar='VERSION',
        help='The image version to use for the cluster. Defaults to the '
        'latest version.')

  parser.add_argument(
      '--bucket',
      help='The Google Cloud Storage bucket to use with the Google Cloud '
      'Storage connector. A bucket is auto created when this parameter is '
      'not specified.')

  netparser = parser.add_mutually_exclusive_group()
  netparser.add_argument(
      '--network',
      help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
  netparser.add_argument(
      '--subnet',
      help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
  parser.add_argument(
      '--num-worker-local-ssds',
      type=int,
      help='The number of local SSDs to attach to each worker in a cluster.')
  parser.add_argument(
      '--num-master-local-ssds',
      type=int,
      help='The number of local SSDs to attach to the master in a cluster.')
  parser.add_argument(
      '--initialization-actions',
      type=arg_parsers.ArgList(min_length=1),
      metavar='CLOUD_STORAGE_URI',
      help=('A list of Google Cloud Storage URIs of '
            'executables to run on each node in the cluster.'))
  parser.add_argument(
      '--initialization-action-timeout',
      type=arg_parsers.Duration(),
      metavar='TIMEOUT',
      default='10m',
      help=('The maximum duration of each initialization action. See '
            '$ gcloud topic datetimes for information on duration formats.'))
  parser.add_argument(
      '--num-masters',
      type=arg_parsers.CustomFunctionValidator(
          lambda n: int(n) in [1, 3],
          'Number of masters must be 1 (Standard) or 3 (High Availability)',
          parser=arg_parsers.BoundedInt(1, 3)),
      help="""\
      The number of master nodes in the cluster.

      [format="csv",options="header"]
      |========
      Number of Masters,Cluster Mode
      1,Standard
      3,High Availability
      |========
      """)
  parser.add_argument(
      '--properties',
      type=arg_parsers.ArgDict(),
      metavar='PREFIX:PROPERTY=VALUE',
      default={},
      help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

[format="csv",options="header"]
|========
Prefix,File,Purpose of file
capacity-scheduler,capacity-scheduler.xml,Hadoop YARN Capacity Scheduler configuration
core,core-site.xml,Hadoop general configuration
distcp,distcp-default.xml,Hadoop Distributed Copy configuration
hadoop-env,hadoop-env.sh,Hadoop specific environment variables
hdfs,hdfs-site.xml,Hadoop HDFS configuration
hive,hive-site.xml,Hive configuration
mapred,mapred-site.xml,Hadoop MapReduce configuration
mapred-env,mapred-env.sh,Hadoop MapReduce specific environment variables
pig,pig.properties,Pig configuration
spark,spark-defaults.conf,Spark configuration
spark-env,spark-env.sh,Spark specific environment variables
yarn,yarn-site.xml,Hadoop YARN configuration
yarn-env,yarn-env.sh,Hadoop YARN specific environment variables
|========

See https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/cluster-properties
for more information.

""")
  parser.add_argument(
      '--service-account',
      help='The Google Cloud IAM service account to be authenticated as.')
  parser.add_argument(
      '--scopes',
      type=arg_parsers.ArgList(min_length=1),
      metavar='SCOPE',
      help="""\
Specifies scopes for the node instances. The project's default service account
is used. Multiple SCOPEs can specified, separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following scopes necessary for the cluster to function properly are always
added, even if not explicitly specified:

[format="csv"]
|========
{minimum_scopes}
|========

If this flag is not specified the following default scopes are also included:

[format="csv"]
|========
{additional_scopes}
|========

If you want to enable all scopes use the 'cloud-platform' scope.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========

{scope_deprecation_msg}
""".format(
    minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS),
    additional_scopes='\n'.join(constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
    aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP,
    scope_deprecation_msg=compute_constants.DEPRECATED_SCOPES_MESSAGES))

  master_boot_disk_size = parser.add_mutually_exclusive_group()
  worker_boot_disk_size = parser.add_mutually_exclusive_group()

  # Deprecated, to be removed at a future date.
  master_boot_disk_size.add_argument(
      '--master-boot-disk-size-gb',
      action=actions.DeprecationAction(
          '--master-boot-disk-size-gb',
          warn=('The `--master-boot-disk-size-gb` flag is deprecated. '
                'Use `--master-boot-disk-size` flag with "GB" after value.')),
      type=int,
      hidden=True,
      help='Use `--master-boot-disk-size` flag with "GB" after value.')
  worker_boot_disk_size.add_argument(
      '--worker-boot-disk-size-gb',
      action=actions.DeprecationAction(
          '--worker-boot-disk-size-gb',
          warn=('The `--worker-boot-disk-size-gb` flag is deprecated. '
                'Use `--worker-boot-disk-size` flag with "GB" after value.')),
      type=int,
      hidden=True,
      help='Use `--worker-boot-disk-size` flag with "GB" after value.')

  boot_disk_size_detailed_help = """\
      The size of the boot disk. The value must be a
      whole number followed by a size unit of ``KB'' for kilobyte, ``MB''
      for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,
      ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk
      can have is 10 GB. Disk size must be a multiple of 1 GB.
      """
  master_boot_disk_size.add_argument(
      '--master-boot-disk-size',
      type=arg_parsers.BinarySize(lower_bound='10GB'),
      help=boot_disk_size_detailed_help)
  worker_boot_disk_size.add_argument(
      '--worker-boot-disk-size',
      type=arg_parsers.BinarySize(lower_bound='10GB'),
      help=boot_disk_size_detailed_help)
  parser.add_argument(
      '--preemptible-worker-boot-disk-size',
      type=arg_parsers.BinarySize(lower_bound='10GB'),
      help=boot_disk_size_detailed_help)

  # Args that are visible only in Beta track
  parser.add_argument(
      '--no-address',
      action='store_true',
      help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """,
      hidden=not beta)

  if beta:
    boot_disk_type_detailed_help = """\
        The type of the boot disk. The value must be ``pd-standard'' or
        ``pd-ssd''.
        """
    parser.add_argument(
        '--master-boot-disk-type', help=boot_disk_type_detailed_help)
    parser.add_argument(
        '--worker-boot-disk-type', help=boot_disk_type_detailed_help)
    parser.add_argument(
        '--preemptible-worker-boot-disk-type',
        help=boot_disk_type_detailed_help)
Beispiel #25
0
def ArgsForClusterRef(parser, beta=False, include_deprecated=True):     \
    # pylint: disable=unused-argument
    """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
    include_deprecated: whether deprecated flags should be included
  """
    labels_util.AddCreateLabelsFlags(parser)
    instances_flags.AddTagsArgs(parser)
    # 30m is backend timeout + 5m for safety buffer.
    flags.AddTimeoutFlag(parser, default='35m')
    flags.AddZoneFlag(parser, short_flags=include_deprecated)

    parser.add_argument(
        '--metadata',
        type=arg_parsers.ArgDict(min_length=1),
        action='append',
        default=None,
        help=('Metadata to be made available to the guest operating system '
              'running on the instances'),
        metavar='KEY=VALUE')

    # Either allow creating a single node cluster (--single-node), or specifying
    # the number of workers in the multi-node cluster (--num-workers and
    # --num-preemptible-workers)
    node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
    node_group.add_argument('--single-node',
                            action='store_true',
                            help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
    # Not mutually exclusive
    worker_group = node_group.add_argument_group(
        help='Multi-node cluster flags')
    worker_group.add_argument(
        '--num-workers',
        type=int,
        help='The number of worker nodes in the cluster. Defaults to '
        'server-specified.')
    worker_group.add_argument(
        '--num-preemptible-workers',
        type=int,
        help='The number of preemptible worker nodes in the cluster.')

    parser.add_argument(
        '--master-machine-type',
        help='The type of machine to use for the master. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--worker-machine-type',
        help='The type of machine to use for workers. Defaults to '
        'server-specified.')
    image_parser = parser.add_mutually_exclusive_group()
    # TODO(b/73291743): Add external doc link to --image
    image_parser.add_argument(
        '--image',
        metavar='IMAGE',
        help='The full custom image URI or the custom image name that '
        'will be used to create a cluster.')
    image_parser.add_argument(
        '--image-version',
        metavar='VERSION',
        help='The image version to use for the cluster. Defaults to the '
        'latest version.')
    parser.add_argument(
        '--bucket',
        help='The Google Cloud Storage bucket to use with the Google Cloud '
        'Storage connector. A bucket is auto created when this parameter is '
        'not specified.')

    netparser = parser.add_mutually_exclusive_group()
    netparser.add_argument('--network',
                           help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
    netparser.add_argument('--subnet',
                           help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
    parser.add_argument(
        '--num-worker-local-ssds',
        type=int,
        help='The number of local SSDs to attach to each worker in a cluster.')
    parser.add_argument(
        '--num-master-local-ssds',
        type=int,
        help='The number of local SSDs to attach to the master in a cluster.')
    parser.add_argument(
        '--initialization-actions',
        type=arg_parsers.ArgList(min_length=1),
        metavar='CLOUD_STORAGE_URI',
        help=('A list of Google Cloud Storage URIs of '
              'executables to run on each node in the cluster.'))
    parser.add_argument(
        '--initialization-action-timeout',
        type=arg_parsers.Duration(),
        metavar='TIMEOUT',
        default='10m',
        help=('The maximum duration of each initialization action. See '
              '$ gcloud topic datetimes for information on duration formats.'))
    parser.add_argument(
        '--num-masters',
        type=arg_parsers.CustomFunctionValidator(
            lambda n: int(n) in [1, 3],
            'Number of masters must be 1 (Standard) or 3 (High Availability)',
            parser=arg_parsers.BoundedInt(1, 3)),
        help="""\
      The number of master nodes in the cluster.

      Number of Masters | Cluster Mode
      --- | ---
      1 | Standard
      3 | High Availability
      """)
    parser.add_argument('--properties',
                        type=arg_parsers.ArgDict(),
                        metavar='PREFIX:PROPERTY=VALUE',
                        default={},
                        help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

Prefix | File | Purpose of file
--- | --- | ---
capacity-scheduler | capacity-scheduler.xml | Hadoop YARN Capacity Scheduler configuration
core | core-site.xml | Hadoop general configuration
distcp | distcp-default.xml | Hadoop Distributed Copy configuration
hadoop-env | hadoop-env.sh | Hadoop specific environment variables
hdfs | hdfs-site.xml | Hadoop HDFS configuration
hive | hive-site.xml | Hive configuration
mapred | mapred-site.xml | Hadoop MapReduce configuration
mapred-env | mapred-env.sh | Hadoop MapReduce specific environment variables
pig | pig.properties | Pig configuration
spark | spark-defaults.conf | Spark configuration
spark-env | spark-env.sh | Spark specific environment variables
yarn | yarn-site.xml | Hadoop YARN configuration
yarn-env | yarn-env.sh | Hadoop YARN specific environment variables

See https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/cluster-properties
for more information.

""")
    parser.add_argument(
        '--service-account',
        help='The Google Cloud IAM service account to be authenticated as.')
    parser.add_argument('--scopes',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='SCOPE',
                        help="""\
Specifies scopes for the node instances. Multiple SCOPEs can be specified,
separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following *minimum scopes* are necessary for the cluster to function
properly and are always added, even if not explicitly specified:

  {minimum_scopes}

If the `--scopes` flag is not specified, the following *default scopes*
are also included:

  {additional_scopes}

If you want to enable all scopes use the 'cloud-platform' scope.

{scopes_help}
""".format(minimum_scopes='\n  '.join(constants.MINIMUM_SCOPE_URIS),
           additional_scopes='\n  '.join(
               constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
           scopes_help=compute_helpers.SCOPES_HELP))

    if include_deprecated:
        _AddDiskArgsDeprecated(parser)
    else:
        _AddDiskArgs(parser)

    # --no-address is an exception to the no negative-flag style guildline to be
    # consistent with gcloud compute instances create --no-address
    parser.add_argument('--no-address',
                        action='store_true',
                        help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      If omitted the instances in the cluster will each be assigned an
      ephemeral external IP address.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """)

    boot_disk_type_detailed_help = """\
      The type of the boot disk. The value must be ``pd-standard'' or
      ``pd-ssd''.
      """
    parser.add_argument('--master-boot-disk-type',
                        help=boot_disk_type_detailed_help)
    parser.add_argument('--worker-boot-disk-type',
                        help=boot_disk_type_detailed_help)
    parser.add_argument('--preemptible-worker-boot-disk-type',
                        help=boot_disk_type_detailed_help)
Beispiel #26
0
def AddRotationPeriodFlag(parser):
    parser.add_argument(
        '--rotation-period',
        type=arg_parsers.Duration(lower_bound='1d'),
        help=('Automatic rotation period of the key. See '
              '$ gcloud topic datetimes for information on duration formats.'))
Beispiel #27
0
def AddAutoscalerArgs(parser):
    """Adds commandline arguments to parser."""
    parser.add_argument(
        '--scale-based-on-load-balancing',
        action='store_true',
        help=('Sets autoscaling based on load balancing '
              'utilization.'),
    )
    parser.add_argument(
        '--scale-based-on-cpu',
        action='store_true',
        help='Sets autoscaling based on cpu utilization.',
    )
    parser.add_argument(
        '--target',
        help='The managed instance group to scale, '
        'either the fully-qualified URL or the managed instance '
        'group name.',
        required=True,
    )
    parser.add_argument(
        '--cool-down-period',
        type=arg_parsers.Duration(),
        help='The number of seconds to wait after a virtual '
        'machine has been started before the autoscaler starts '
        'collecting information from it. This accounts '
        'for the amount of time it may take for a virtual '
        'machine to initialize, during which the collected usage '
        'information is not reliable for autoscaling. It is '
        'recommended that you set this to at least the amount of '
        'time it takes for your virtual machine and applications '
        'to start.',
    )
    parser.add_argument(
        '--description',
        help='An optional description for this '
        'autoscaler.',
    )
    parser.add_argument(
        '--min-num-replicas',
        type=int,
        help='Sets the minimum number of instances the '
        'autoscaler will maintain. The autoscaler will never '
        'scale the number of instances below this number. If not '
        'provided, the default is 2.',
    )
    parser.add_argument(
        '--max-num-replicas',
        type=int,
        help='Sets the maximum number of instances the '
        'autoscaler will maintain for the managed instance '
        'group.',
        required=True,
    )
    parser.add_argument(
        '--target-cpu-utilization',
        type=float,
        help='The CPU utilization the autoscaler will aim to '
        'maintain. Must be a float between 0.0 to 1.0, '
        'exclusive',
    )
    parser.add_argument(
        '--custom-metric',
        type=str,
        help='Sets a Google Cloud '
        'Monitoring instance metric to scale based on (see '
        'https://developers.google.com/cloud-monitoring/metrics'
        ').',
    )
    parser.add_argument(
        '--target-custom-metric-utilization',
        type=float,
        help='The custom metric level the autoscaler will aim to '
        'maintain. This can be a float that is greater than '
        '0.0.',
    )
    parser.add_argument(
        '--custom-metric-utilization-target-type',
        type=str,
        help='The type of your custom metric. Choose from '
        'the following: {0}.'.format(
            ', '.join(ALLOWED_UTILIZATION_TARGET_TYPES)),
    )
    parser.add_argument(
        '--target-load-balancer-utilization',
        type=float,
        help='The HTTP load balancer utilization level the '
        'autoscaler will maintain. This must be a float greater '
        'than 0.0.',
    )
    custom_metric_utilization = parser.add_argument(
        '--custom-metric-utilization',
        type=arg_parsers.ArgDict(spec={
            'metric': str,
            'utilization-target': float,
            'utilization-target-type': str
        }, ),
        # pylint:disable=protected-access
        action=arg_parsers.FloatingListValuesCatcher(argparse._AppendAction),
        help=(
            'Adds target value of a Google Cloud Monitoring metric Autoscaler '
            'will aim to maintain.'),
        metavar='PROPERTY=VALUE',
    )
    custom_metric_utilization.detailed_help = """
  Adds target value of a Google Cloud Monitoring metric Autoscaler will aim to
  maintain.

  *metric*::: Protocol-free URL of a Google Cloud Monitoring metric.

  *utilization-target*::: Value of the metric Autoscaler will aim to maintain
  on the average (greater than 0.0).

  *utilization-target-type*::: How target is expressed. You can choose from the
  following: {0}.
  """.format(', '.join(ALLOWED_UTILIZATION_TARGET_TYPES))
Beispiel #28
0
def ParseRetentionDurationWithDefault(value):
    if value == subscriptions.DEFAULT_MESSAGE_RETENTION_VALUE:
        return value
    return util.FormatDuration(arg_parsers.Duration()(value))
Beispiel #29
0
  def Args(parser):
    parser.add_argument(
        'name',
        metavar='NAME',
        help='The name of the image to set deprecation status of.')

    state = parser.add_argument(
        '--state',
        choices=DEPRECATION_STATUSES,
        type=lambda x: x.upper(),
        required=True,
        help='The deprecation state to set on the image.')
    state.detailed_help = """\
       The deprecation state to set on the image.
       An image's default state is ``ACTIVE'', suggesting that the image is
       currently supported. Operations which create a new
       resource using a ``DEPRECATED'' image
       return successfully, but with a warning indicating that the image
       is deprecated and recommending its replacement. New uses of ``OBSOLETE'' or
       ``DELETED'' images result in an error. Note that setting the
       deprecation state to ``DELETED'' will not automatically delete the
       image. You must still make a request to delete the image to remove it
       from the image list.
       """

    replacement = parser.add_argument(
        '--replacement',
        help='Specifies a Compute Engine image as a replacement.')
    replacement.detailed_help = """\
       Specifies a Compute Engine image as a replacement for the image
       being phased out. Users of the deprecated image will be advised to switch
       to this replacement. For example, ``--replacement example-image'' or
       ``--replacement projects/google/global/images/example-image''. This
       flag is required when setting the image state to anything other than
       ``ACTIVE'' or when --delete-in, --delete-on, --obsolete-in, or
       --obsolete-on is provided.
       """

    delete_group = parser.add_mutually_exclusive_group()

    delete_on = delete_group.add_argument(
        '--delete-on',
        help=('Specifies the date and time when the state of this image '
              'will become DELETED.'))
    delete_on.detailed_help = """\
       Similar to --delete-in, but specifies an absolute time when the status
       should be set to DELETED. The date and time
       specified must be a valid RFC 3339 full-date or date-time.
       For times in UTC, this looks like ``YYYY-MM-DDTHH:MM:SSZ''. For example:
       2020-01-02T00:00:00Z for midnight on January 2, 2020 in UTC.
       This flag is mutually exclusive with --delete-in.
       """

    delete_in = delete_group.add_argument(
        '--delete-in',
        help=('Specifies the amount of time until this image should become '
              'DELETED.'),
        type=arg_parsers.Duration())
    delete_in.detailed_help = """\
       Specifies the amount of time until the image's status should be set
       to DELETED. For instance, specifying ``30d'' will set the status to
       DELETED in 30 days from the current system time. Valid units for this
       flag are ``s'' for seconds, ``m'' for minutes, ``h'' for hours and
       ``d'' for days. If no unit is specified, seconds is assumed.

       Note that the image will not be deleted automatically. The image will
       only be marked as deleted. An explicit request to delete the image must
       be made in order to remove it from the image list.
       This flag is mutually exclusive with --delete-on.
       """

    obsolete_group = parser.add_mutually_exclusive_group()

    obsolete_on = obsolete_group.add_argument(
        '--obsolete-on',
        help=('Specifies the date and time when the state of this image '
              'will become OBSOLETE.'))
    obsolete_on.detailed_help = """\
       Specifies time (in the same format as --delete-on) when this image's
       status should become OBSOLETE.
       This flag is mutually exclusive with --obsolete-in.
       """

    obsolete_in = obsolete_group.add_argument(
        '--obsolete-in',
        help=('Specifies the amount of time until this image should become '
              'OBSOLETE.'),
        type=arg_parsers.Duration())
    obsolete_in.detailed_help = """\
Beispiel #30
0
def AddUpdateArgs(parser, include_beta=False, include_alpha=False):
    """Add args to the parser for subnet update.

  Args:
    parser: The argparse parser.
    include_beta: Include beta functionality.
    include_alpha: Include alpha functionality.
  """
    updated_field = parser.add_mutually_exclusive_group()

    updated_field.add_argument(
        '--enable-private-ip-google-access',
        action=arg_parsers.StoreTrueFalseAction,
        help=(
            'Enable/disable access to Google Cloud APIs from this subnet for '
            'instances without a public ip address.'))

    updated_field.add_argument('--add-secondary-ranges',
                               type=arg_parsers.ArgDict(min_length=1),
                               action='append',
                               metavar='PROPERTY=VALUE',
                               help="""\
      Adds secondary IP ranges to the subnetwork for use in IP aliasing.

      For example, `--add-secondary-ranges range1=192.168.64.0/24` adds
      a secondary range 192.168.64.0/24 with name range1.

      * `RANGE_NAME` - Name of the secondary range.
      * `RANGE` - `IP range in CIDR format.`
      """)

    updated_field.add_argument('--remove-secondary-ranges',
                               type=arg_parsers.ArgList(min_length=1),
                               action='append',
                               metavar='PROPERTY=VALUE',
                               help="""\
      Removes secondary ranges from the subnetwork.

      For example, `--remove-secondary-ranges range2,range3` removes the
      secondary ranges with names range2 and range3.
      """)

    updated_field.add_argument(
        '--enable-flow-logs',
        action=arg_parsers.StoreTrueFalseAction,
        help=(
            'Enable/disable VPC flow logging for this subnet. More information '
            'for VPC flow logs can be found at '
            'https://cloud.google.com/vpc/docs/using-flow-logs.'))

    if include_beta:
        messages = apis.GetMessagesModule('compute',
                                          compute_api.COMPUTE_BETA_API_VERSION)

        AddLoggingAggregationInterval(parser, messages)

        parser.add_argument('--logging-flow-sampling',
                            type=arg_parsers.BoundedFloat(lower_bound=0.0,
                                                          upper_bound=1.0),
                            help="""\
        Can only be specified if VPC flow logging for this subnetwork is
        enabled. The value of the field must be in [0, 1]. Set the sampling rate
        of VPC flow logs within the subnetwork where 1.0 means all collected
        logs are reported and 0.0 means no logs are reported. Default is 0.5
        which means half of all collected logs are reported.
        """)

        AddLoggingMetadata(parser, messages)

    if include_alpha:
        messages = apis.GetMessagesModule(
            'compute', compute_api.COMPUTE_ALPHA_API_VERSION)
        updated_field.add_argument(
            '--role',
            choices={'ACTIVE': 'The ACTIVE subnet that is currently used.'},
            type=lambda x: x.replace('-', '_').upper(),
            help=(
                'The role is set to ACTIVE to update a BACKUP reserved '
                'address range to\nbe the new ACTIVE address range. Note '
                'that the only supported value for\nthis flag is ACTIVE since '
                'setting an address range to BACKUP is not\nsupported. '
                '\n\nThis field is only valid when updating a reserved IP '
                'address range used\nfor the purpose of Internal HTTP(S) Load '
                'Balancer.'))

        parser.add_argument('--drain-timeout',
                            type=arg_parsers.Duration(lower_bound='0s'),
                            default='0s',
                            help="""\
        The time period for draining traffic from Internal HTTP(S) Load Balancer
        proxies that are assigned addresses in the current ACTIVE subnetwork.
        For example, ``1h'', ``60m'' and ``3600s'' each specify a duration of
        1 hour for draining the traffic. Longer times reduce the number of
        proxies that are draining traffic at any one time, and so improve
        the availability of proxies for load balancing. The drain timeout is
        only applicable when the [--role=ACTIVE] flag is being used.
        """)

        AddLoggingAggregationIntervalAlpha(parser, messages)

        parser.add_argument('--flow-sampling',
                            type=arg_parsers.BoundedFloat(lower_bound=0.0,
                                                          upper_bound=1.0),
                            help="""\
        Can only be specified if VPC flow logging for this subnetwork is
        enabled. The value of the field must be in [0, 1]. Set the sampling rate
        of VPC flow logs within the subnetwork where 1.0 means all collected
        logs are reported and 0.0 means no logs are reported. Default is 0.5
        which means half of all collected logs are reported.
        """)

        AddLoggingMetadataAlpha(parser, messages)

        updated_field.add_argument(
            '--enable-private-ipv6-access',
            action=arg_parsers.StoreTrueFalseAction,
            help=('Enable/disable private IPv6 access for the subnet.'))

        GetPrivateIpv6GoogleAccessTypeFlagMapper(
            messages).choice_arg.AddToParser(updated_field)