def AddBaseArgs(parser):
    """Declare flag and positional arguments for this command parser."""
    # TODO(b/35705305): move common flags to command_lib.sql.flags
    base.ASYNC_FLAG.AddToParser(parser)
    parser.display_info.AddFormat(flags.INSTANCES_FORMAT_BETA)
    parser.add_argument(
        '--activation-policy',
        required=False,
        choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
        default=None,
        help=('The activation policy for this instance. This specifies when '
              'the instance should be activated and is applicable only when '
              'the instance state is RUNNABLE. More information on activation '
              'policies can be found here: '
              'https://cloud.google.com/sql/faq#activation_policy'))
    parser.add_argument(
        '--assign-ip',
        required=False,
        action='store_true',
        default=None,  # Tri-valued: None => don't change the setting.
        help='Specified if the instance must be assigned an IP address.')
    parser.add_argument(
        '--authorized-gae-apps',
        type=arg_parsers.ArgList(min_length=1),
        metavar='APP',
        required=False,
        default=[],
        help=('First Generation instances only. List of IDs for App Engine '
              'applications running in the Standard environment that can '
              'access this instance.'))
    parser.add_argument(
        '--authorized-networks',
        type=arg_parsers.ArgList(min_length=1),
        metavar='NETWORK',
        required=False,
        default=[],
        help=('The list of external networks that are allowed to connect to '
              'the instance. Specified in CIDR notation, also known as '
              '\'slash\' notation (e.g. 192.168.100.0/24).'))
    parser.add_argument('--backup',
                        required=False,
                        action='store_true',
                        default=True,
                        help='Enables daily backup.')
    parser.add_argument(
        '--backup-start-time',
        required=False,
        help=('The start time of daily backups, specified in the 24 hour '
              'format - HH:MM, in the UTC timezone.'))
    parser.add_argument(
        '--cpu',
        type=int,
        required=False,
        help=('A whole number value indicating how many cores are desired in '
              'the machine. Both --cpu and --memory must be specified if a '
              'custom machine type is desired, and the --tier flag must be '
              'omitted.'))
    parser.add_argument(
        '--database-flags',
        type=arg_parsers.ArgDict(min_length=1),
        metavar='FLAG=VALUE',
        required=False,
        help=(
            'A comma-separated list of database flags to set on the '
            'instance. Use an equals sign to separate flag name and value. '
            'Flags without values, like skip_grant_tables, can be written '
            'out without a value after, e.g., `skip_grant_tables=`. Use '
            'on/off for booleans. View the Instance Resource API for allowed '
            'flags. (e.g., `--database-flags max_allowed_packet=55555,'
            'skip_grant_tables=,log_output=1`)'))
    parser.add_argument(
        '--database-version',
        required=False,
        default='MYSQL_5_6',
        choices=['MYSQL_5_5', 'MYSQL_5_6', 'MYSQL_5_7', 'POSTGRES_9_6'],
        help='The database engine type and version.')
    parser.add_argument(
        '--enable-bin-log',
        required=False,
        action='store_true',
        default=None,  # Tri-valued: None => don't change the setting.
        help=(
            'Specified if binary log should be enabled. If backup '
            'configuration is disabled, binary log must be disabled as well.'))
    parser.add_argument(
        '--failover-replica-name',
        required=False,
        help='Also create a failover replica with the specified name.')
    parser.add_argument(
        '--follow-gae-app',
        required=False,
        help=('First Generation instances only. The App Engine app this '
              'instance should follow. It must be in the same region as '
              'the instance.'))
    parser.add_argument(
        '--gce-zone',
        required=False,
        help=('The preferred Compute Engine zone (e.g. us-central1-a, '
              'us-central1-b, etc.).'))
    parser.add_argument('instance',
                        type=command_validate.InstanceNameRegexpValidator(),
                        help='Cloud SQL instance ID.')
    parser.add_argument(
        '--maintenance-release-channel',
        choices={
            'production':
            'Production updates are stable and recommended '
            'for applications in production.',
            'preview':
            'Preview updates release prior to production '
            'updates. You may wish to use the preview channel '
            'for dev/test applications so that you can preview '
            'their compatibility with your application prior '
            'to the production release.'
        },
        type=str.lower,
        help="Which channel's updates to apply during the maintenance window.")
    parser.add_argument(
        '--maintenance-window-day',
        choices=arg_parsers.DayOfWeek.DAYS,
        type=arg_parsers.DayOfWeek.Parse,
        help='Day of week for maintenance window, in UTC time zone.')
    parser.add_argument(
        '--maintenance-window-hour',
        type=arg_parsers.BoundedInt(lower_bound=0, upper_bound=23),
        help='Hour of day for maintenance window, in UTC time zone.')
    parser.add_argument(
        '--master-instance-name',
        required=False,
        help=('Name of the instance which will act as master in the '
              'replication setup. The newly created instance will be a read '
              'replica of the specified master instance.'))
    parser.add_argument(
        '--memory',
        type=arg_parsers.BinarySize(),
        required=False,
        help=('A whole number value indicating how much memory is desired in '
              'the machine. A size unit should be provided (eg. 3072MiB or '
              '9GiB) - if no units are specified, GiB is assumed. Both --cpu '
              'and --memory must be specified if a custom machine type is '
              'desired, and the --tier flag must be omitted.'))
    parser.add_argument('--on-premises-host-port',
                        required=False,
                        help=argparse.SUPPRESS)
    parser.add_argument(
        '--pricing-plan',
        '-p',
        required=False,
        choices=['PER_USE', 'PACKAGE'],
        default='PER_USE',
        help=('First Generation instances only. The pricing plan for this '
              'instance.'))
    # TODO(b/31989340): add remote completion
    parser.add_argument(
        '--region',
        required=False,
        default='us-central',
        help=(
            'The regional location (e.g. asia-east1, us-east1). See the full '
            'list of regions at '
            'https://cloud.google.com/sql/docs/instance-locations.'))
    parser.add_argument('--replica-type',
                        choices=['READ', 'FAILOVER'],
                        help='The type of replica to create.')
    parser.add_argument('--replication',
                        required=False,
                        choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
                        default=None,
                        help='The type of replication this instance uses.')
    parser.add_argument(
        '--require-ssl',
        required=False,
        action='store_true',
        default=None,  # Tri-valued: None => don't change the setting.
        help='Specified if users connecting over IP must use SSL.')
    parser.add_argument(
        '--storage-auto-increase',
        action='store_true',
        default=None,
        help=('Storage size can be increased, but it cannot be decreased; '
              'storage increases are permanent for the life of the instance. '
              'With this setting enabled, a spike in storage requirements '
              'can result in permanently increased storage costs for your '
              'instance. However, if an instance runs out of available space, '
              'it can result in the instance going offline, dropping existing '
              'connections.'))
    parser.add_argument(
        '--storage-size',
        type=arg_parsers.BinarySize(lower_bound='10GB',
                                    upper_bound='10230GB',
                                    suggested_binary_size_scales=['GB']),
        help=(
            'Amount of storage allocated to the instance. Must be an integer '
            'number of GB between 10GB and 10230GB inclusive.'))
    parser.add_argument('--storage-type',
                        required=False,
                        choices=['SSD', 'HDD'],
                        default=None,
                        help='The storage type for the instance.')
    parser.add_argument(
        '--tier',
        '-t',
        required=False,
        help=('The tier for this instance. For Second Generation instances, '
              'TIER is the instance\'s machine type (e.g., db-n1-standard-1). '
              'For PostgreSQL instances, only shared-core machine types '
              '(e.g., db-f1-micro) apply. A complete list of tiers is '
              'available here: https://cloud.google.com/sql/pricing.'))
示例#2
0
def _Args(parser):
    """Register flags for this command.

  Args:
    parser: An argparse.ArgumentParser-like object. It is mocked out in order
        to capture some information, but behaves like an ArgumentParser.
  """
    parser.add_argument('name', help='The name of this cluster.')
    # Timeout in seconds for operation
    parser.add_argument('--timeout',
                        type=int,
                        default=1800,
                        hidden=True,
                        help='THIS ARGUMENT NEEDS HELP TEXT.')
    flags.AddAsyncFlag(parser)
    parser.add_argument(
        '--num-nodes',
        type=arg_parsers.BoundedInt(1),
        help=
        'The number of nodes to be created in each of the cluster\'s zones.',
        default=3)
    parser.add_argument(
        '--machine-type',
        '-m',
        help='The type of machine to use for nodes. Defaults to n1-standard-1.'
    )
    parser.add_argument('--subnetwork',
                        help="""\
The Google Compute Engine subnetwork
(https://cloud.google.com/compute/docs/subnetworks) to which the cluster is
connected. The subnetwork must belong to the network specified by --network.

Cannot be used with the "--create-subnetwork" option.
""")
    parser.add_argument(
        '--network',
        help='The Compute Engine Network that the cluster will connect to. '
        'Google Kubernetes Engine will use this network when creating routes '
        'and firewalls for the clusters. Defaults to the \'default\' network.')
    parser.add_argument(
        '--cluster-ipv4-cidr',
        help='The IP address range for the pods in this cluster in CIDR '
        'notation (e.g. 10.0.0.0/14).  Prior to Kubernetes version 1.7.0 '
        'this must be a subset of 10.0.0.0/8; however, starting with version '
        '1.7.0 can be any RFC 1918 IP range.')
    parser.add_argument('--enable-cloud-logging',
                        action='store_true',
                        default=True,
                        help='Automatically send logs from the cluster to the '
                        'Google Cloud Logging API.')
    parser.set_defaults(enable_cloud_logging=True)
    parser.add_argument(
        '--enable-cloud-monitoring',
        action='store_true',
        default=True,
        help='Automatically send metrics from pods in the cluster to the '
        'Google Cloud Monitoring API. VM metrics will be collected by Google '
        'Compute Engine regardless of this setting.')
    parser.set_defaults(enable_cloud_monitoring=True)
    parser.add_argument(
        '--disk-size',
        type=int,
        help='Size in GB for node VM boot disks. Defaults to 100GB.')
    flags.AddBasicAuthFlags(parser)
    parser.add_argument(
        '--max-nodes-per-pool',
        type=arg_parsers.BoundedInt(100, api_adapter.MAX_NODES_PER_POOL),
        help='The maximum number of nodes to allocate per default initial node '
        'pool. Kubernetes Engine will automatically create enough nodes pools '
        'such that each node pool contains less than '
        '--max-nodes-per-pool nodes. Defaults to {nodes} nodes, but can be set '
        'as low as 100 nodes per pool on initial create.'.format(
            nodes=api_adapter.MAX_NODES_PER_POOL))
    flags.AddImageTypeFlag(parser, 'cluster')
    flags.AddImageFlag(parser, hidden=True)
    flags.AddImageProjectFlag(parser, hidden=True)
    flags.AddImageFamilyFlag(parser, hidden=True)
    flags.AddNodeLabelsFlag(parser)
    flags.AddTagsFlag(
        parser, """\
Applies the given Compute Engine tags (comma separated) on all nodes in the new
node-pool. Example:

  $ {command} example-cluster --tags=tag1,tag2

New nodes, including ones created by resize or recreate, will have these tags
on the Compute Engine API instance object and can be used in firewall rules.
See https://cloud.google.com/sdk/gcloud/reference/compute/firewall-rules/create
for examples.
""")
    flags.AddClusterVersionFlag(parser)
    # TODO(b/36071127): unhide this flag after we have enough ssd.
    flags.AddDiskTypeFlag(parser, suppressed=True)
    flags.AddEnableAutoUpgradeFlag(parser)
    parser.display_info.AddFormat(util.CLUSTERS_FORMAT)
    flags.AddNodeVersionFlag(parser)
    flags.AddIssueClientCertificateFlag(parser)
示例#3
0
def AddMaintenanceWindowHour(parser):
  parser.add_argument(
      '--maintenance-window-hour',
      type=arg_parsers.BoundedInt(lower_bound=0, upper_bound=23),
      help='Hour of day for maintenance window, in UTC time zone.')
示例#4
0
def ArgsForClusterRef(parser, beta=False, include_deprecated=True):     \
    # pylint: disable=unused-argument
    """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
    include_deprecated: whether deprecated flags should be included
  """
    labels_util.AddCreateLabelsFlags(parser)
    instances_flags.AddTagsArgs(parser)
    # 30m is backend timeout + 5m for safety buffer.
    flags.AddTimeoutFlag(parser, default='35m')
    flags.AddZoneFlag(parser, short_flags=include_deprecated)

    parser.add_argument(
        '--metadata',
        type=arg_parsers.ArgDict(min_length=1),
        action='append',
        default=None,
        help=('Metadata to be made available to the guest operating system '
              'running on the instances'),
        metavar='KEY=VALUE')

    # Either allow creating a single node cluster (--single-node), or specifying
    # the number of workers in the multi-node cluster (--num-workers and
    # --num-preemptible-workers)
    node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
    node_group.add_argument('--single-node',
                            action='store_true',
                            help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
    # Not mutually exclusive
    worker_group = node_group.add_argument_group(
        help='Multi-node cluster flags')
    worker_group.add_argument(
        '--num-workers',
        type=int,
        help='The number of worker nodes in the cluster. Defaults to '
        'server-specified.')
    worker_group.add_argument(
        '--num-preemptible-workers',
        type=int,
        help='The number of preemptible worker nodes in the cluster.')

    parser.add_argument(
        '--master-machine-type',
        help='The type of machine to use for the master. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--worker-machine-type',
        help='The type of machine to use for workers. Defaults to '
        'server-specified.')
    image_parser = parser.add_mutually_exclusive_group()
    # TODO(b/73291743): Add external doc link to --image
    image_parser.add_argument(
        '--image',
        metavar='IMAGE',
        help='The full custom image URI or the custom image name that '
        'will be used to create a cluster.')
    image_parser.add_argument(
        '--image-version',
        metavar='VERSION',
        help='The image version to use for the cluster. Defaults to the '
        'latest version.')
    parser.add_argument(
        '--bucket',
        help='The Google Cloud Storage bucket to use with the Google Cloud '
        'Storage connector. A bucket is auto created when this parameter is '
        'not specified.')

    netparser = parser.add_mutually_exclusive_group()
    netparser.add_argument('--network',
                           help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
    netparser.add_argument('--subnet',
                           help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
    parser.add_argument(
        '--num-worker-local-ssds',
        type=int,
        help='The number of local SSDs to attach to each worker in a cluster.')
    parser.add_argument(
        '--num-master-local-ssds',
        type=int,
        help='The number of local SSDs to attach to the master in a cluster.')
    parser.add_argument(
        '--initialization-actions',
        type=arg_parsers.ArgList(min_length=1),
        metavar='CLOUD_STORAGE_URI',
        help=('A list of Google Cloud Storage URIs of '
              'executables to run on each node in the cluster.'))
    parser.add_argument(
        '--initialization-action-timeout',
        type=arg_parsers.Duration(),
        metavar='TIMEOUT',
        default='10m',
        help=('The maximum duration of each initialization action. See '
              '$ gcloud topic datetimes for information on duration formats.'))
    parser.add_argument(
        '--num-masters',
        type=arg_parsers.CustomFunctionValidator(
            lambda n: int(n) in [1, 3],
            'Number of masters must be 1 (Standard) or 3 (High Availability)',
            parser=arg_parsers.BoundedInt(1, 3)),
        help="""\
      The number of master nodes in the cluster.

      [format="csv",options="header"]
      |========
      Number of Masters,Cluster Mode
      1,Standard
      3,High Availability
      |========
      """)
    parser.add_argument('--properties',
                        type=arg_parsers.ArgDict(),
                        metavar='PREFIX:PROPERTY=VALUE',
                        default={},
                        help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

[format="csv",options="header"]
|========
Prefix,File,Purpose of file
capacity-scheduler,capacity-scheduler.xml,Hadoop YARN Capacity Scheduler configuration
core,core-site.xml,Hadoop general configuration
distcp,distcp-default.xml,Hadoop Distributed Copy configuration
hadoop-env,hadoop-env.sh,Hadoop specific environment variables
hdfs,hdfs-site.xml,Hadoop HDFS configuration
hive,hive-site.xml,Hive configuration
mapred,mapred-site.xml,Hadoop MapReduce configuration
mapred-env,mapred-env.sh,Hadoop MapReduce specific environment variables
pig,pig.properties,Pig configuration
spark,spark-defaults.conf,Spark configuration
spark-env,spark-env.sh,Spark specific environment variables
yarn,yarn-site.xml,Hadoop YARN configuration
yarn-env,yarn-env.sh,Hadoop YARN specific environment variables
|========

See https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/cluster-properties
for more information.

""")
    parser.add_argument(
        '--service-account',
        help='The Google Cloud IAM service account to be authenticated as.')
    parser.add_argument('--scopes',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='SCOPE',
                        help="""\
Specifies scopes for the node instances. Multiple SCOPEs can be specified,
separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following *minimum scopes* are necessary for the cluster to function
properly and are always added, even if not explicitly specified:

[format="csv"]
|========
{minimum_scopes}
|========

If the `--scopes` flag is not specified, the following *default scopes*
are also included:

[format="csv"]
|========
{additional_scopes}
|========

If you want to enable all scopes use the 'cloud-platform' scope.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========

{scope_deprecation_msg}
""".format(minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS),
           additional_scopes='\n'.join(
               constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
           aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP,
           scope_deprecation_msg=compute_constants.DEPRECATED_SCOPES_MESSAGES))

    if include_deprecated:
        _AddDiskArgsDeprecated(parser)
    else:
        _AddDiskArgs(parser)

    # --no-address is an exception to the no negative-flag style guildline to be
    # consistent with gcloud compute instances create --no-address
    parser.add_argument('--no-address',
                        action='store_true',
                        help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      If omitted the instances in the cluster will each be assigned an
      ephemeral external IP address.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """)

    boot_disk_type_detailed_help = """\
      The type of the boot disk. The value must be ``pd-standard'' or
      ``pd-ssd''.
      """
    parser.add_argument('--master-boot-disk-type',
                        help=boot_disk_type_detailed_help)
    parser.add_argument('--worker-boot-disk-type',
                        help=boot_disk_type_detailed_help)
    parser.add_argument('--preemptible-worker-boot-disk-type',
                        help=boot_disk_type_detailed_help)
示例#5
0
    complete.""")

FILTER_FLAG = Argument('--filter',
                       metavar='EXPRESSION',
                       require_coverage_in_tests=False,
                       category=LIST_COMMAND_FLAGS,
                       help="""\
    Apply a Boolean filter _EXPRESSION_ to each resource item to be listed.
    If the expression evaluates `True`, then that item is listed. For more
    details and examples of filter expressions, run $ gcloud topic filters. This
    flag interacts with other flags that are applied in this order: *--flatten*,
    *--sort-by*, *--filter*, *--limit*.""")

LIMIT_FLAG = Argument('--limit',
                      type=arg_parsers.BoundedInt(1,
                                                  sys.maxsize,
                                                  unlimited=True),
                      require_coverage_in_tests=False,
                      category=LIST_COMMAND_FLAGS,
                      help="""\
    Maximum number of resources to list. The default is *unlimited*.
    This flag interacts with other flags that are applied in this order:
    *--flatten*, *--sort-by*, *--filter*, *--limit*.
    """)

PAGE_SIZE_FLAG = Argument('--page-size',
                          type=arg_parsers.BoundedInt(1,
                                                      sys.maxsize,
                                                      unlimited=True),
                          require_coverage_in_tests=False,
                          category=LIST_COMMAND_FLAGS,
def AddAutoscalerArgs(
    parser, queue_scaling_enabled=False, autoscaling_file_enabled=False,
    stackdriver_metrics_flags=False):
  """Adds commandline arguments to parser."""
  parser.add_argument(
      '--cool-down-period',
      type=arg_parsers.Duration(),
      help=('The time period that the autoscaler should wait before it starts '
            'collecting information from a new instance. This prevents the '
            'autoscaler from collecting information when the instance is '
            'initializing, during which the collected usage would not be '
            'reliable. The default is 60 seconds.'))
  parser.add_argument('--description', help='Notes about Autoscaler.')
  parser.add_argument('--min-num-replicas',
                      type=arg_parsers.BoundedInt(0, sys.maxint),
                      help='Minimum number of replicas Autoscaler will set.')
  parser.add_argument('--max-num-replicas',
                      type=arg_parsers.BoundedInt(0, sys.maxint),
                      required=not autoscaling_file_enabled,
                      help='Maximum number of replicas Autoscaler will set.')
  parser.add_argument('--scale-based-on-cpu',
                      action='store_true',
                      help='Autoscaler will be based on CPU utilization.')
  parser.add_argument('--scale-based-on-load-balancing',
                      action='store_true',
                      help=('Use autoscaling based on load balancing '
                            'utilization.'))
  parser.add_argument('--target-cpu-utilization',
                      type=arg_parsers.BoundedFloat(0.0, 1.0),
                      help='Autoscaler will aim to maintain CPU utilization at '
                      'target level (0.0 to 1.0).')
  parser.add_argument('--target-load-balancing-utilization',
                      type=arg_parsers.BoundedFloat(0.0, None),
                      help='Autoscaler will aim to maintain the load balancing '
                      'utilization level (greater than 0.0).')
  custom_metric_utilization_help = """\
      Adds a target metric value for the Autoscaler to use.

      *metric*::: Protocol-free URL of a Google Cloud Monitoring metric.

      *utilization-target*::: Value of the metric Autoscaler will aim to
      maintain (greater than 0.0).

      *utilization-target-type*::: How target is expressed. Valid values: {0}.
      """.format(', '.join(_ALLOWED_UTILIZATION_TARGET_TYPES))
  if stackdriver_metrics_flags:
    custom_metric_utilization_help += (
        '\nMutually exclusive with `--update-stackdriver-metric`.')
  parser.add_argument(
      '--custom-metric-utilization',
      type=arg_parsers.ArgDict(
          spec={
              'metric': str,
              'utilization-target': float,
              'utilization-target-type': str,
          },
      ),
      action='append',
      help=custom_metric_utilization_help,
  )

  if queue_scaling_enabled:
    parser.add_argument(
        '--queue-scaling-cloud-pub-sub',
        type=arg_parsers.ArgDict(
            spec={
                'topic': str,
                'subscription': str,
            },
        ),
        help="""\
        Specifies queue-based scaling based on a Cloud Pub/Sub queuing system.
        Both topic and subscription are required.

        *topic*::: Topic specification. Can be just a name or a partial URL
        (starting with "projects/..."). Topic must belong to the same project as
        Autoscaler.

        *subscription*::: Subscription specification. Can be just a name or a
        partial URL (starting with "projects/..."). Subscription must belong to
        the same project as Autoscaler and must be connected to the specified
        topic.
        """
    )
    parser.add_argument('--queue-scaling-acceptable-backlog-per-instance',
                        type=arg_parsers.BoundedFloat(0.0, None),
                        help='Queue-based scaling target: autoscaler will aim '
                        'to assure that average number of tasks in the queue '
                        'is no greater than this value.',)
    parser.add_argument('--queue-scaling-single-worker-throughput',
                        type=arg_parsers.BoundedFloat(0.0, None),
                        help='Hint the autoscaler for queue-based scaling on '
                        'how much throughput a single worker instance is able '
                        'to consume.')
  if autoscaling_file_enabled:
    parser.add_argument(
        '--autoscaling-file',
        metavar='PATH',
        help=('Path of the file from which autoscaling configuration will be '
              'loaded. This flag allows you to atomically setup complex '
              'autoscalers.'))
  if stackdriver_metrics_flags:
    parser.add_argument(
        '--remove-stackdriver-metric',
        metavar='METRIC',
        help=('Stackdriver metric to remove from autoscaling configuration. '
              'If the metric is the only input used for autoscaling the '
              'command will fail.'))
    parser.add_argument(
        '--update-stackdriver-metric',
        metavar='METRIC',
        help=('Stackdriver metric to use as an input for autoscaling. '
              'When using this flag you must also specify target value of the '
              'metric by specifying '
              '`--stackdriver-metric-single-instance-assignment` or '
              '`--stackdriver-metric-utilization-target` and '
              '`--stackdriver-metric-utilization-target-type`. '
              'Mutually exclusive with `--custom-metric-utilization`.'))
    parser.add_argument(
        '--stackdriver-metric-filter',
        metavar='FILTER',
        help=('Expression for filtering samples used to autoscale, see '
              'https://cloud.google.com/monitoring/api/v3/filters.'))
    parser.add_argument(
        '--stackdriver-metric-utilization-target',
        metavar='TARGET',
        type=float,
        help=('Value of the metric Autoscaler will aim to maintain. When '
              'specifying this flag you must also provide '
              '`--stackdriver-metric-utilization-target-type`. Mutually '
              'exclusive with '
              '`--stackdriver-metric-single-instance-assignment` and '
              '`--custom-metric-utilization`.'))

    parser.add_argument(
        '--stackdriver-metric-utilization-target-type',
        metavar='TARGET_TYPE',
        choices=_ALLOWED_UTILIZATION_TARGET_TYPES_LOWER,
        help=('Value of the metric Autoscaler will aim to maintain. When '
              'specifying this flag you must also provide '
              '`--stackdriver-metric-utilization-target`. Mutually '
              'exclusive with '
              '`--stackdriver-metric-single-instance-assignment` and '
              '`--custom-metric-utilization`.'))
    parser.add_argument(
        '--stackdriver-metric-single-instance-assignment',
        metavar='ASSIGNMENT',
        type=float,
        help=('Autoscaler will aim to maintain value of metric divided by '
              'number of instances at this level. Mutually '
              'exclusive with '
              '`-stackdriver-metric-utilization-target-type`, '
              '`-stackdriver-metric-utilization-target-type`, and '
              '`--custom-metric-utilization`.'))
示例#7
0
 def Args(parser):
     """Adds args and flags to the parser."""
     # TODO(b/35705305): move common flags to command_lib.sql.flags
     parser.add_argument(
         '--activation-policy',
         required=False,
         choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
         help=(
             'The activation policy for this instance. This specifies when '
             'the instance should be activated and is applicable only when '
             'the instance state is RUNNABLE.'))
     parser.add_argument(
         '--assign-ip',
         action='store_true',
         default=None,  # Tri-valued: None => don't change the setting.
         help='The instance must be assigned an IP address.')
     parser.add_argument('--async',
                         action='store_true',
                         help='Do not wait for the operation to complete.')
     gae_apps_group = parser.add_mutually_exclusive_group()
     gae_apps_group.add_argument(
         '--authorized-gae-apps',
         type=arg_parsers.ArgList(min_length=1),
         metavar='APP',
         required=False,
         help=(
             'First Generation instances only. List of IDs for App Engine '
             'applications running in the Standard environment that '
             'can access this instance.'))
     gae_apps_group.add_argument(
         '--clear-gae-apps',
         required=False,
         action='store_true',
         help=
         ('Specified to clear the list of App Engine apps that can access '
          'this instance.'))
     networks_group = parser.add_mutually_exclusive_group()
     networks_group.add_argument(
         '--authorized-networks',
         type=arg_parsers.ArgList(min_length=1),
         metavar='NETWORK',
         required=False,
         help=(
             'The list of external networks that are allowed to connect to '
             'the instance. Specified in CIDR notation, also known as '
             '\'slash\' notation (e.g. 192.168.100.0/24).'))
     networks_group.add_argument(
         '--clear-authorized-networks',
         required=False,
         action='store_true',
         help=
         ('Clear the list of external networks that are allowed to connect '
          'to the instance.'))
     backups_group = parser.add_mutually_exclusive_group()
     backups_group.add_argument(
         '--backup-start-time',
         required=False,
         help=('The start time of daily backups, specified in the 24 hour '
               'format - HH:MM, in the UTC timezone.'))
     backups_group.add_argument(
         '--no-backup',
         required=False,
         action='store_true',
         help='Specified if daily backup should be disabled.')
     database_flags_group = parser.add_mutually_exclusive_group()
     database_flags_group.add_argument(
         '--database-flags',
         type=arg_parsers.ArgDict(min_length=1),
         metavar='FLAG=VALUE',
         required=False,
         help=
         ('A comma-separated list of database flags to set on the '
          'instance. Use an equals sign to separate flag name and value. '
          'Flags without values, like skip_grant_tables, can be written '
          'out without a value after, e.g., `skip_grant_tables=`. Use '
          'on/off for booleans. View the Instance Resource API for allowed '
          'flags. (e.g., `--database-flags max_allowed_packet=55555,'
          'skip_grant_tables=,log_output=1`)'))
     database_flags_group.add_argument(
         '--clear-database-flags',
         required=False,
         action='store_true',
         help=('Clear the database flags set on the instance. '
               'WARNING: Instance will be restarted.'))
     parser.add_argument(
         '--cpu',
         type=int,
         required=False,
         help='A whole number value indicating how many cores are desired in '
         'the machine. Both --cpu and --memory must be specified if a custom '
         'machine type is desired, and the --tier flag must be omitted.')
     parser.add_argument(
         '--diff',
         action='store_true',
         help='Show what changed as a result of the update.')
     parser.add_argument(
         '--enable-bin-log',
         action='store_true',
         default=None,  # Tri-valued: None => don't change the setting.
         help=
         ('Enable binary log. If backup configuration is disabled, binary '
          'log should be disabled as well.'))
     parser.add_argument(
         '--enable-database-replication',
         action='store_true',
         default=None,  # Tri-valued: None => don't change the setting.
         help=(
             'Enable database replication. Applicable only for read replica '
             'instance(s). WARNING: Instance will be restarted.'))
     parser.add_argument(
         '--follow-gae-app',
         required=False,
         help=(
             'First Generation instances only. The App Engine app '
             'this instance should follow. It must be in the same region as '
             'the instance. WARNING: Instance may be restarted.'))
     parser.add_argument(
         '--gce-zone',
         required=False,
         help=('The preferred Compute Engine zone (e.g. us-central1-a, '
               'us-central1-b, etc.). WARNING: Instance may be restarted.'))
     parser.add_argument('instance',
                         completion_resource='sql.instances',
                         help='Cloud SQL instance ID.')
     parser.add_argument(
         '--maintenance-release-channel',
         choices={
             'production':
             'Production updates are stable and recommended '
             'for applications in production.',
             'preview':
             'Preview updates release prior to production '
             'updates. You may wish to use the preview channel '
             'for dev/test applications so that you can preview '
             'their compatibility with your application prior '
             'to the production release.'
         },
         type=str.lower,
         help=
         "Which channel's updates to apply during the maintenance window.")
     parser.add_argument(
         '--maintenance-window-any',
         action='store_true',
         help='Removes the user-specified maintenance window.')
     parser.add_argument(
         '--maintenance-window-day',
         choices=arg_parsers.DayOfWeek.DAYS,
         type=arg_parsers.DayOfWeek.Parse,
         help='Day of week for maintenance window, in UTC time zone.')
     parser.add_argument(
         '--maintenance-window-hour',
         type=arg_parsers.BoundedInt(lower_bound=0, upper_bound=23),
         help='Hour of day for maintenance window, in UTC time zone.')
     parser.add_argument(
         '--pricing-plan',
         '-p',
         required=False,
         choices=['PER_USE', 'PACKAGE'],
         help=('First Generation instances only. The pricing plan for this '
               'instance.'))
     parser.add_argument(
         '--memory',
         type=arg_parsers.BinarySize(),
         required=False,
         help='A whole number value indicating how much memory is desired in '
         'the machine. A size unit should be provided (eg. 3072MiB or 9GiB) - '
         'if no units are specified, GiB is assumed. Both --cpu and --memory '
         'must be specified if a custom machine type is desired, and the --tier '
         'flag must be omitted.')
     parser.add_argument('--replication',
                         required=False,
                         choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
                         help='The type of replication this instance uses.')
     parser.add_argument(
         '--require-ssl',
         action='store_true',
         default=None,  # Tri-valued: None => don't change the setting.
         help=
         ('mysqld should default to \'REQUIRE X509\' for users connecting '
          'over IP.'))
     parser.add_argument(
         '--storage-auto-increase',
         action='store_true',
         default=None,
         help='Storage size can be increased, but it cannot be '
         'decreased; storage increases are permanent for the life of the '
         'instance. With this setting enabled, a spike in storage requirements '
         'can result in permanently increased storage costs for your instance. '
         'However, if an instance runs out of available space, it can result in '
         'the instance going offline, dropping existing connections.')
     parser.add_argument(
         '--storage-size',
         type=arg_parsers.BinarySize(lower_bound='10GB',
                                     upper_bound='10230GB',
                                     suggested_binary_size_scales=['GB']),
         help=
         'Amount of storage allocated to the instance. Must be an integer '
         'number of GB between 10GB and 10230GB inclusive.')
     parser.add_argument(
         '--tier',
         '-t',
         required=False,
         help=
         ('The tier for this instance. For Second Generation instances, '
          'TIER is the instance\'s machine type (e.g., db-n1-standard-1). '
          'For PostgreSQL instances, only shared-core machine types '
          '(e.g., db-f1-micro) apply. A complete list of tiers is '
          'available here: https://cloud.google.com/sql/pricing. WARNING: '
          'Instance will be restarted.'))
示例#8
0
  def Args(parser):
    parser.add_argument(
        'query',
        help="""\
            Query string in search query syntax in Cloud Data Catalog. For more
            information, see:
            https://cloud.google.com/data-catalog/docs/how-to/search-reference
        """)

    parser.add_argument(
        '--limit',
        type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True),
        require_coverage_in_tests=False,
        category=base.LIST_COMMAND_FLAGS,
        help="""\
            Maximum number of resources to list. The default is *unlimited*.
        """)

    parser.add_argument(
        '--page-size',
        type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True),
        require_coverage_in_tests=False,
        category=base.LIST_COMMAND_FLAGS,
        help="""\
            Some services group resource list output into pages. This flag specifies
            the maximum number of resources per page.
        """)

    parser.add_argument(
        '--order-by',
        require_coverage_in_tests=False,
        category=base.LIST_COMMAND_FLAGS,
        help="""\
            Specifies the ordering of results. Defaults to 'relevance'.

            Currently supported case-sensitive choices are:

                *  relevance
                *  last_access_timestamp [asc|desc]: defaults to descending.
                *  last_modified_timestamp [asc|desc]: defaults to descending.

            To order by last modified timestamp ascending, specify:
            `--order-by="last_modified_timestamp desc"`.
        """)

    scope_group = parser.add_argument_group(
        'Scope. Control the scope of the search.',
        required=True)
    scope_group.add_argument(
        '--include-gcp-public-datasets',
        action='store_true',
        help="""\
            If True, include Google Cloud Platform public datasets in the search
            results.
        """)
    scope_group.add_argument(
        '--include-project-ids',
        type=arg_parsers.ArgList(),
        metavar='PROJECT',
        help="""\
            List of Cloud Project IDs to include in the search.
        """)
    scope_group.add_argument(
        '--include-organization-ids',
        type=arg_parsers.ArgList(),
        metavar='ORGANIZATION',
        help="""\
            List of Cloud Organization IDs to include in the search.
        """)
示例#9
0
 def _Flags(parser):
     parser.add_argument(
         '--limit',
         type=arg_parsers.BoundedInt(1, sys.maxint, unlimited=True),
         help='The maximum number of invalidations to list.')
示例#10
0
    def Args(parser):
        parser.add_argument('urlmap', help='The name of the URL map.')

        parser.add_argument('--limit',
                            type=arg_parsers.BoundedInt(1, sys.maxint),
                            help='The maximum number of results.')
示例#11
0
def AddBgpPeerArgs(parser,
                   for_add_bgp_peer=False,
                   support_bfd=False,
                   support_enable=False,
                   is_update=False):
  """Adds common arguments for managing BGP peers."""

  operation = 'updated'
  if for_add_bgp_peer:
    operation = 'added'

  parser.add_argument(
      '--peer-name',
      required=True,
      help='The name of the new BGP peer being {0}.'.format(operation))

  parser.add_argument(
      '--interface',
      required=for_add_bgp_peer,
      help='The name of the interface for this BGP peer.')

  parser.add_argument(
      '--peer-asn',
      required=for_add_bgp_peer,
      type=int,
      help='The BGP autonomous system number (ASN) for this BGP peer. '
      'Must be a 16-bit or 32-bit private ASN as defined in '
      'https://tools.ietf.org/html/rfc6996, for example `--asn=64512`.')

  # For add_bgp_peer, we only require the interface and infer the IP instead.
  if not for_add_bgp_peer:
    parser.add_argument(
        '--ip-address',
        type=utils.IPV4Argument,
        help='The link-local address of the Cloud Router interface for this '
        'BGP peer. Must be a link-local IPv4 address belonging to the range '
        '169.254.0.0/16 and must belong to same subnet as the interface '
        'address of the peer router.')

  parser.add_argument(
      '--peer-ip-address',
      type=utils.IPV4Argument,
      help='The link-local address of the peer router. Must be a link-local '
      'IPv4 address belonging to the range 169.254.0.0/16.')

  parser.add_argument(
      '--advertised-route-priority',
      type=arg_parsers.BoundedInt(lower_bound=0, upper_bound=65535),
      help='The priority of routes advertised to this BGP peer. In the case '
      'where there is more than one matching route of maximum length, '
      'the routes with lowest priority value win. 0 <= priority <= '
      '65535. If not specified, will use Google-managed priorities.')

  if support_bfd:
    bfd_group_help = (
        'Arguments to {0} BFD (Bidirectional Forwarding Detection) '
        'settings:'.format('update' if is_update else 'configure'))
    bfd_group = parser.add_group(help=bfd_group_help,
                                 hidden=True)
    bfd_group.add_argument(
        '--bfd-session-initialization-mode',
        choices=_BFD_SESSION_INITIALIZATION_MODE_CHOICES,
        type=lambda mode: mode.upper(),
        metavar='BFD_SESSION_INITIALIZATION_MODE',
        hidden=True,
        help='The BFD session initialization mode for this BGP peer. Must be one '
        'of:\n\n'
        'ACTIVE - The Cloud Router will initiate the BFD session for this BGP '
        'peer.\n\n'
        'PASSIVE - The Cloud Router will wait for the peer router to initiate '
        'the BFD session for this BGP peer.\n\n'
        'DISABLED - BFD is disabled for this BGP peer.')

    bfd_group.add_argument(
        '--bfd-min-transmit-interval',
        type=arg_parsers.Duration(
            default_unit='ms',
            lower_bound='100ms',
            upper_bound='30000ms',
            parsed_unit='ms'),
        hidden=True,
        help='The minimum transmit interval between BFD packets. If BFD echo '
        'mode is enabled on both this router and the peer router this sets the '
        'minimum transmit interval of BFD echo packets. Otherwise, this sets '
        'the minimum transmit interval of BFD control packets. The default is '
        '300 milliseconds. See $ gcloud topic datetimes for information on '
        'duration formats.')
    bfd_group.add_argument(
        '--bfd-min-receive-interval',
        type=arg_parsers.Duration(
            default_unit='ms',
            lower_bound='100ms',
            upper_bound='30000ms',
            parsed_unit='ms'),
        hidden=True,
        help='The minimum receive interval between BFD packets. If BFD echo '
        'mode is enabled on both this router and the peer router this sets the '
        'minimum receive interval of BFD echo packets. Otherwise, this sets '
        'the minimum receive interval of BFD control packets. The default is '
        '300 milliseconds. See $ gcloud topic datetimes for information on '
        'duration formats.')
    bfd_group.add_argument(
        '--bfd-multiplier',
        type=int,
        hidden=True,
        help='The number of consecutive BFD control packets that must be '
        'missed before BFD declares that a peer is unavailable.')
    bfd_group.add_argument(
        '--bfd-packet-mode',
        choices=_BFD_PACKET_MODE_CHOICES,
        type=lambda mode: mode.upper(),
        metavar='BFD_PACKET_MODE',
        hidden=True,
        help='The BGP packet mode for this BGP peer. Must be one of:\n\n'
        'CONTROL_AND_ECHO - BFD echo mode is enabled for this BGP peer. If the '
        'peer router also has BFD echo mode enabled, BFD echo packets will be '
        'sent to the other router. If the peer router does not have BFD echo '
        'mode enabled, only control packets will be sent.\n\n'
        'CONTROL_ONLY - BFD echo mode is disabled for this BGP peer. If this '
        'router and the peer router have a multi-hop connection, '
        'BFD_PACKET_MODE should be set to CONTROL_ONLY as BFD echo mode is '
        'only supported on single hop connections.')
    bfd_group.add_argument(
        '--bfd-slow-timer-interval',
        type=arg_parsers.Duration(
            default_unit='ms',
            lower_bound='1000ms',
            upper_bound='30000ms',
            parsed_unit='ms'),
        hidden=True,
        help='The transmit and receive interval between BFD control packets in '
        'milliseconds if echo mode is enabled on both this router and the peer '
        'router. The default is 5000 milliseconds. See $ gcloud topic '
        'datetimes for information on duration formats.')
    enabled_display_help = (
        'If enabled, the peer connection can be established with routing '
        'information. If disabled, any active session with the peer is '
        'terminated and all associated routing information is removed.')
  if support_enable:
    if not is_update:
      enabled_display_help += ' Enabled by default.'
    parser.add_argument(
        '--enabled',
        hidden=True,
        action=arg_parsers.StoreTrueFalseAction,
        help=enabled_display_help)
示例#12
0
 def Args(parser):
   """Register flags for this command."""
   parser.add_argument(
       '--limit', default=None,
       help='If greater than zero, the maximum number of results.',
       type=arg_parsers.BoundedInt(1, sys.maxint))
示例#13
0
def AddUploadModelFlags(parser):
    """Adds flags for UploadModel."""
    AddRegionResourceArg(parser, 'to upload model')
    base.Argument('--display-name',
                  required=True,
                  help=('Display name of the model.')).AddToParser(parser)
    base.Argument('--description',
                  required=False,
                  help=('Description of the model.')).AddToParser(parser)
    base.Argument('--container-image-uri',
                  required=True,
                  help=("""\
URI of the Model serving container file in the Container Registry
(e.g. gcr.io/myproject/server:latest).
""")).AddToParser(parser)
    base.Argument('--artifact-uri',
                  help=("""\
Path to the directory containing the Model artifact and any of its
supporting files.
""")).AddToParser(parser)
    parser.add_argument(
        '--container-env-vars',
        metavar='KEY=VALUE',
        type=arg_parsers.ArgDict(),
        action=arg_parsers.UpdateAction,
        help='List of key-value pairs to set as environment variables.')
    parser.add_argument('--container-command',
                        type=arg_parsers.ArgList(),
                        metavar='COMMAND',
                        action=arg_parsers.UpdateAction,
                        help="""\
Entrypoint for the container image. If not specified, the container
image's default entrypoint is run.
""")
    parser.add_argument('--container-args',
                        metavar='ARG',
                        type=arg_parsers.ArgList(),
                        action=arg_parsers.UpdateAction,
                        help="""\
Comma-separated arguments passed to the command run by the container
image. If not specified and no `--command` is provided, the container
image's default command is used.
""")
    parser.add_argument('--container-ports',
                        metavar='PORT',
                        type=arg_parsers.ArgList(
                            element_type=arg_parsers.BoundedInt(1, 65535)),
                        action=arg_parsers.UpdateAction,
                        help="""\
Container ports to receive requests at. Must be a number between 1 and 65535,
inclusive.
""")
    parser.add_argument(
        '--container-predict-route',
        help='HTTP path to send prediction requests to inside the container.')
    parser.add_argument(
        '--container-health-route',
        help='HTTP path to send health checks to inside the container.')
    # For Explanation.
    parser.add_argument(
        '--explanation-method',
        help=
        'Method used for explanation. Accepted values are `integrated-gradients`, `xrai` and `sampled-shapley`.'
    )
    parser.add_argument(
        '--explanation-metadata-file',
        help=
        'Path to a local JSON file that contains the metadata describing the Model\'s input and output for explanation.'
    )
    parser.add_argument(
        '--explanation-step-count',
        type=int,
        help='Number of steps to approximate the path integral for explanation.'
    )
    parser.add_argument(
        '--explanation-path-count',
        type=int,
        help=
        'Number of feature permutations to consider when approximating the Shapley values for explanation.'
    )
    parser.add_argument(
        '--smooth-grad-noisy-sample-count',
        type=int,
        help=
        'Number of gradient samples used for approximation at explanation. Only applicable to explanation method `integrated-gradients` or `xrai`.'
    )
    parser.add_argument(
        '--smooth-grad-noise-sigma',
        type=float,
        help=
        'Single float value used to add noise to all the features for explanation. Only applicable to explanation method `integrated-gradients` or `xrai`.'
    )
    parser.add_argument(
        '--smooth-grad-noise-sigma-by-feature',
        metavar='KEY=VALUE',
        type=arg_parsers.ArgDict(),
        action=arg_parsers.UpdateAction,
        help=
        'Noise sigma by features for explanation. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. Only applicable to explanation method `integrated-gradients` or `xrai`.'
    )
示例#14
0
def AddPredictionResourcesArgs(parser, version):
    """Add arguments for prediction resources."""
    base.Argument('--min-replica-count',
                  type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True),
                  help=("""\
Minimum number of machine replicas the deployed model will be always deployed
on. If specified, the value must be equal to or larger than 1.

If not specified and the uploaded models use dedicated resources, the default
value is 1.
""")).AddToParser(parser)

    base.Argument(
        '--max-replica-count',
        type=int,
        help=('Maximum number of machine replicas the deployed model will be '
              'always deployed on.')).AddToParser(parser)

    base.Argument(
        '--autoscaling-metric-specs',
        metavar='METRIC-NAME=TARGET',
        type=arg_parsers.ArgDict(key_type=str, value_type=int),
        action=arg_parsers.UpdateAction,
        help="""\
Metric specifications that overrides a resource utilization metric's target
value. At most one entry is allowed per metric.

*METRIC-NAME*::: Resource metric name. Choices are {}.

*TARGET*::: Target resource utilization in percentage (1% - 100%) for the
given metric. If the value is set to 60, the target resource utilization is 60%.

For example:
`--autoscaling-metric-specs=cpu-usage=70`
""".format(', '.join([
            "'{}'".format(c)
            for c in sorted(constants.OP_AUTOSCALING_METRIC_NAME_MAPPER.keys())
        ]))).AddToParser(parser)

    base.Argument('--machine-type',
                  help="""\
The machine resources to be used for each node of this deployment.
For available machine types, see
https://cloud.google.com/ai-platform-unified/docs/predictions/machine-types.
""").AddToParser(parser)

    base.Argument('--accelerator',
                  type=arg_parsers.ArgDict(spec={
                      'type': str,
                      'count': int,
                  },
                                           required_keys=['type']),
                  help="""\
Manage the accelerator config for GPU serving. When deploying a model with
Compute Engine Machine Types, a GPU accelerator may also
be selected.

*type*::: The type of the accelerator. Choices are {}.

*count*::: The number of accelerators to attach to each machine running the job.
 This is usually 1. If not specified, the default value is 1.

For example:
`--accelerator=type=nvidia-tesla-k80,count=1`""".format(', '.join([
                      "'{}'".format(c)
                      for c in GetAcceleratorTypeMapper(version).choices
                  ]))).AddToParser(parser)
示例#15
0
def AddSubscriptionSettingsFlags(parser,
                                 is_update=False,
                                 support_message_ordering=False,
                                 support_filtering=False):
    """Adds the flags for creating or updating a subscription.

  Args:
    parser: The argparse parser.
    is_update: Whether or not this is for the update operation (vs. create).
    support_message_ordering: Whether or not flags for ordering should be added.
    support_filtering: Whether or not flags for filtering should be added.
  """
    AddAckDeadlineFlag(parser)
    AddPushConfigFlags(parser)
    AddMessageRetentionFlags(parser, is_update)
    if support_message_ordering and not is_update:
        parser.add_argument(
            '--enable-message-ordering',
            action='store_true',
            default=None,
            help=
            """Whether or not to receive messages with the same ordering key in
            order. If true, messages with the same ordering key will by sent to
            subscribers in the order in which they were received by Cloud
            Pub/Sub.""")
    if support_filtering and not is_update:
        parser.add_argument(
            '--message-filter',
            type=str,
            help=
            """Expression to filter messages. If set, Pub/Sub only delivers the
        messages that match the filter. The expression must be a non-empty
        string in the Pub/Sub filtering language.""")
    current_group = parser
    if is_update:
        mutual_exclusive_group = current_group.add_mutually_exclusive_group()
        mutual_exclusive_group.add_argument(
            '--clear-dead-letter-policy',
            action='store_true',
            default=None,
            help=
            """If set, clear the dead letter policy from the subscription.""")
        current_group = mutual_exclusive_group

    set_dead_letter_policy_group = current_group.add_argument_group(
        help="""Dead Letter Queue Options. The Cloud Pub/Sub service account
           associated with the enclosing subscription's parent project (i.e.,
           service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com)
           must have permission to Publish() to this topic and Acknowledge()
           messages on this subscription.""")
    dead_letter_topic = resource_args.CreateTopicResourceArg(
        'to publish dead letter messages to.',
        flag_name='dead-letter-topic',
        positional=False,
        required=False)
    resource_args.AddResourceArgs(set_dead_letter_policy_group,
                                  [dead_letter_topic])
    set_dead_letter_policy_group.add_argument(
        '--max-delivery-attempts',
        type=arg_parsers.BoundedInt(5, 100),
        default=None,
        help="""Maximum number of delivery attempts for any message. The value
          must be between 5 and 100. Defaults to 5. `--dead-letter-topic`
          must also be specified.""")
    parser.add_argument(
        '--expiration-period',
        type=ParseExpirationPeriodWithNeverSentinel,
        help="""The subscription will expire if it is inactive for the given
          period. Valid values are strings of the form INTEGER[UNIT], where
          UNIT is one of "s", "m", "h", and "d" for seconds, minutes, hours,
          and days, respectively. If the unit is omitted, seconds is
          assumed. This flag additionally accepts the special value "never" to
          indicate that the subscription will never expire.""")

    current_group = parser
    if is_update:
        mutual_exclusive_group = current_group.add_mutually_exclusive_group()
        mutual_exclusive_group.add_argument(
            '--clear-retry-policy',
            action='store_true',
            default=None,
            help="""If set, clear the retry policy from the subscription.""")
        current_group = mutual_exclusive_group

    set_retry_policy_group = current_group.add_argument_group(
        help="""Retry Policy Options. Retry policy specifies how Cloud Pub/Sub
              retries message delivery for this subscription.""")

    set_retry_policy_group.add_argument(
        '--min-retry-delay',
        type=arg_parsers.Duration(lower_bound='0s', upper_bound='600s'),
        help="""The minimum delay between consecutive deliveries of a given
          message. Value should be between 0 and 600 seconds. Defaults to 10
          seconds. Valid values are strings of the form INTEGER[UNIT], where
          UNIT is one of "s", "m", "h", and "d" for seconds, minutes, hours,
          and days, respectively. If the unit is omitted, seconds is
          assumed.""")
    set_retry_policy_group.add_argument(
        '--max-retry-delay',
        type=arg_parsers.Duration(lower_bound='0s', upper_bound='600s'),
        help="""The maximum delay between consecutive deliveries of a given
          message. Value should be between 0 and 600 seconds. Defaults to 10
          seconds. Valid values are strings of the form INTEGER[UNIT], where
          UNIT is one of "s", "m", "h", and "d" for seconds, minutes, hours,
          and days, respectively. If the unit is omitted, seconds is
          assumed.""")
示例#16
0
def AddCycleFrequencyArgs(parser,
                          flag_suffix,
                          start_time_help,
                          cadence_help,
                          supports_hourly=False,
                          has_restricted_start_times=False,
                          supports_weekly=False):
    """Add Cycle Frequency args for Resource Policies."""
    freq_group = parser.add_argument_group('Cycle Frequency Group.',
                                           required=True,
                                           mutex=True)
    if has_restricted_start_times:
        start_time_help += """\
        Valid choices are 00:00, 04:00, 08:00,12:00,
        16:00 and 20:00 UTC. For example, `--start-time="03:00-05"`
        (which gets converted to 08:00 UTC)."""
    freq_flags_group = freq_group.add_group(
        'From flags:' if supports_weekly else '')
    freq_flags_group.add_argument('--start-time',
                                  required=True,
                                  type=arg_parsers.Datetime.Parse,
                                  help=start_time_help)
    cadence_group = freq_flags_group.add_group(mutex=True, required=True)
    cadence_group.add_argument(
        '--daily-{}'.format(flag_suffix),
        dest='daily_cycle',
        action='store_true',
        help='{} starts daily at START_TIME.'.format(cadence_help))

    if supports_hourly:
        cadence_group.add_argument(
            '--hourly-{}'.format(flag_suffix),
            metavar='HOURS',
            dest='hourly_cycle',
            type=arg_parsers.BoundedInt(lower_bound=1),
            help='{} occurs every n hours starting at START_TIME.'.format(
                cadence_help))

    if supports_weekly:
        base.ChoiceArgument(
            '--weekly-{}'.format(flag_suffix),
            dest='weekly_cycle',
            choices=[
                'monday', 'tuesday', 'wednesday', 'thursday', 'friday',
                'saturday', 'sunday'
            ],
            help_str='{} occurs weekly on WEEKLY_{} at START_TIME.'.format(
                cadence_help, flag_suffix.upper())).AddToParser(cadence_group)
        freq_file_group = freq_group.add_group('From file:')
        freq_file_group.add_argument(
            '--weekly-{}-from-file'.format(flag_suffix),
            dest='weekly_cycle_from_file',
            type=arg_parsers.BufferedFileInput(),
            help="""\
        A JSON/YAML file which specifies a weekly schedule. It should be a
        list of objects with the following fields:

        day: Day of the week with the same choices as `--weekly-{}`.
        startTime: Start time of the snapshot schedule with the same format
            as --start-time.
        """.format(flag_suffix))
示例#17
0
def _Args(parser):
    """Register flags for this command.

  Args:
    parser: An argparse.ArgumentParser-like object. It is mocked out in order to
      capture some information, but behaves like an ArgumentParser.
  """
    parser.add_argument('name',
                        help="""\
The name of the cluster to create.

The name may contain only lowercase alphanumerics and '-', must start with a
letter and end with an alphanumeric, and must be no longer than 40
characters.
""")
    # Timeout in seconds for operation
    parser.add_argument(
        '--timeout',
        type=int,
        default=3600,
        hidden=True,
        help='Timeout (seconds) for waiting on the operation to complete.')
    flags.AddAsyncFlag(parser)
    parser.add_argument(
        '--num-nodes',
        type=arg_parsers.BoundedInt(1),
        help=
        'The number of nodes to be created in each of the cluster\'s zones.',
        default=3)
    flags.AddMachineTypeFlag(parser)
    parser.add_argument('--subnetwork',
                        help="""\
The Google Compute Engine subnetwork
(https://cloud.google.com/compute/docs/subnetworks) to which the cluster is
connected. The subnetwork must belong to the network specified by --network.

Cannot be used with the "--create-subnetwork" option.
""")
    parser.add_argument(
        '--network',
        help='The Compute Engine Network that the cluster will connect to. '
        'Google Kubernetes Engine will use this network when creating routes '
        'and firewalls for the clusters. Defaults to the \'default\' network.')
    parser.add_argument(
        '--cluster-ipv4-cidr',
        help='The IP address range for the pods in this cluster in CIDR '
        'notation (e.g. 10.0.0.0/14).  Prior to Kubernetes version 1.7.0 '
        'this must be a subset of 10.0.0.0/8; however, starting with version '
        '1.7.0 can be any RFC 1918 IP range.')
    parser.add_argument(
        '--enable-cloud-logging',
        action=actions.DeprecationAction(
            '--enable-cloud-logging',
            warn=
            'From 1.14, legacy Stackdriver GKE logging is deprecated. Thus, '
            'flag `--enable-cloud-logging` is also deprecated. Please use '
            '`--enable-stackdriver-kubernetes` instead, to migrate to new '
            'Stackdriver Kubernetes Engine monitoring and logging. For more '
            'details, please read: '
            'https://cloud.google.com/monitoring/kubernetes-engine/migration.',
            action='store_true'),
        help='Automatically send logs from the cluster to the Google Cloud '
        'Logging API. This flag is deprecated, use '
        '`--enable-stackdriver-kubernetes` instead.')
    parser.add_argument(
        '--enable-cloud-monitoring',
        action=actions.DeprecationAction(
            '--enable-cloud-monitoring',
            warn='From 1.14, legacy Stackdriver GKE monitoring is deprecated. '
            'Thus, flag `--enable-cloud-monitoring` is also deprecated. Please '
            'use `--enable-stackdriver-kubernetes` instead, to migrate to new '
            'Stackdriver Kubernetes Engine monitoring and logging. For more '
            'details, please read: '
            'https://cloud.google.com/monitoring/kubernetes-engine/migration.',
            action='store_true'),
        help='Automatically send metrics from pods in the cluster to the Google '
        'Cloud Monitoring API. VM metrics will be collected by Google Compute '
        'Engine regardless of this setting. This flag is deprecated, use '
        '`--enable-stackdriver-kubernetes` instead.')
    parser.add_argument('--disk-size',
                        type=arg_parsers.BinarySize(lower_bound='10GB'),
                        help='Size for node VM boot disks. Defaults to 100GB.')
    flags.AddBasicAuthFlags(parser)
    parser.add_argument(
        '--max-nodes-per-pool',
        type=arg_parsers.BoundedInt(100, api_adapter.MAX_NODES_PER_POOL),
        help='The maximum number of nodes to allocate per default initial node '
        'pool. Kubernetes Engine will automatically create enough nodes pools '
        'such that each node pool contains less than '
        '--max-nodes-per-pool nodes. Defaults to {nodes} nodes, but can be set '
        'as low as 100 nodes per pool on initial create.'.format(
            nodes=api_adapter.MAX_NODES_PER_POOL))
    flags.AddImageTypeFlag(parser, 'cluster')
    flags.AddImageFlag(parser, hidden=True)
    flags.AddImageProjectFlag(parser, hidden=True)
    flags.AddImageFamilyFlag(parser, hidden=True)
    flags.AddNodeLabelsFlag(parser)
    flags.AddTagsFlag(
        parser, """\
Applies the given Compute Engine tags (comma separated) on all nodes in the new
node-pool. Example:

  $ {command} example-cluster --tags=tag1,tag2

New nodes, including ones created by resize or recreate, will have these tags
on the Compute Engine API instance object and can be used in firewall rules.
See https://cloud.google.com/sdk/gcloud/reference/compute/firewall-rules/create
for examples.
""")
    parser.display_info.AddFormat(util.CLUSTERS_FORMAT)
    flags.AddIssueClientCertificateFlag(parser)
    flags.AddAcceleratorArgs(parser)
    flags.AddDiskTypeFlag(parser)
    flags.AddMetadataFlags(parser)
    flags.AddDatabaseEncryptionFlag(parser)
    flags.AddShieldedInstanceFlags(parser)
    flags.AddEnableShieldedNodesFlags(parser)
示例#18
0
def AddAutoscalerArgs(parser, queue_scaling_enabled=False):
    """Adds commandline arguments to parser."""
    parser.add_argument(
        '--cool-down-period',
        type=arg_parsers.Duration(),
        help=(
            'The time period that the autoscaler should wait before it starts '
            'collecting information from a new instance. This prevents the '
            'autoscaler from collecting information when the instance is '
            'initializing, during which the collected usage would not be '
            'reliable. The default is 60 seconds.'))
    parser.add_argument('--description', help='Notes about Autoscaler.')
    parser.add_argument('--min-num-replicas',
                        type=arg_parsers.BoundedInt(0, sys.maxint),
                        help='Minimum number of replicas Autoscaler will set.')
    parser.add_argument('--max-num-replicas',
                        type=arg_parsers.BoundedInt(0, sys.maxint),
                        required=True,
                        help='Maximum number of replicas Autoscaler will set.')
    parser.add_argument('--scale-based-on-cpu',
                        action='store_true',
                        help='Autoscaler will be based on CPU utilization.')
    parser.add_argument('--scale-based-on-load-balancing',
                        action='store_true',
                        help=('Use autoscaling based on load balancing '
                              'utilization.'))
    parser.add_argument(
        '--target-cpu-utilization',
        type=arg_parsers.BoundedFloat(0.0, 1.0),
        help='Autoscaler will aim to maintain CPU utilization at '
        'target level (0.0 to 1.0).')
    parser.add_argument(
        '--target-load-balancing-utilization',
        type=arg_parsers.BoundedFloat(0.0, None),
        help='Autoscaler will aim to maintain the load balancing '
        'utilization level (greater than 0.0).')
    parser.add_argument('--custom-metric-utilization',
                        type=arg_parsers.ArgDict(spec={
                            'metric':
                            str,
                            'utilization-target':
                            float,
                            'utilization-target-type':
                            str,
                        }, ),
                        action='append',
                        help="""\
      Adds a target metric value for the to the Autoscaler.

      *metric*::: Protocol-free URL of a Google Cloud Monitoring metric.

      *utilization-target*::: Value of the metric Autoscaler will aim to
      maintain (greater than 0.0).

      *utilization-target-type*::: How target is expressed. Valid values: {0}.
      """.format(', '.join(_ALLOWED_UTILIZATION_TARGET_TYPES)))

    if queue_scaling_enabled:
        parser.add_argument('--queue-scaling-cloud-pub-sub',
                            type=arg_parsers.ArgDict(spec={
                                'topic': str,
                                'subscription': str,
                            }, ),
                            help="""\
        Specifies queue-based scaling based on a Cloud Pub/Sub queuing system.
        Both topic and subscription are required.

        *topic*::: Topic specification. Can be just a name or a partial URL
        (starting with "projects/..."). Topic must belong to the same project as
        Autoscaler.

        *subscription*::: Subscription specification. Can be just a name or a
        partial URL (starting with "projects/..."). Subscription must belong to
        the same project as Autoscaler and must be connected to the specified
        topic.
        """)
        parser.add_argument(
            '--queue-scaling-acceptable-backlog-per-instance',
            type=arg_parsers.BoundedFloat(0.0, None),
            help='Queue-based scaling target: autoscaler will aim '
            'to assure that average number of tasks in the queue '
            'is no greater than this value.',
        )
        parser.add_argument(
            '--queue-scaling-single-worker-throughput',
            type=arg_parsers.BoundedFloat(0.0, None),
            help='Hint the autoscaler for queue-based scaling on '
            'how much throughput a single worker instance is able '
            'to consume.')
示例#19
0
    arg_value: the argument's value parsed from yaml file.

  Returns:
    The validated argument value.

  Raises:
    InvalidArgException: If the arg value is missing or is not valid.
  """
  if arg_value is None:
    raise InvalidArgException(arg_internal_name, 'no argument value found.')
  if arg_internal_name in _ARG_VALIDATORS:
    return _ARG_VALIDATORS[arg_internal_name](arg_internal_name, arg_value)
  return _ValidateString(arg_internal_name, arg_value)


POSITIVE_INT_PARSER = arg_parsers.BoundedInt(1, sys.maxint)
NONNEGATIVE_INT_PARSER = arg_parsers.BoundedInt(0, sys.maxint)
TIMEOUT_PARSER = arg_parsers.Duration(lower_bound='1m', upper_bound='6h')
ORIENTATION_LIST = ['portrait', 'landscape']


def ValidateStringList(arg_internal_name, arg_value):
  """Validate an arg whose value should be a list of strings.

  Args:
    arg_internal_name: the internal form of the arg name.
    arg_value: the argument's value parsed from yaml file.

  Returns:
    The validated argument value.
示例#20
0
def ArgsForClusterRef(parser, beta=False):
    """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
  """
    labels_util.AddCreateLabelsFlags(parser)
    instances_flags.AddTagsArgs(parser)
    # 30m is backend timeout + 5m for safety buffer.
    flags.AddTimeoutFlag(parser, default='35m')
    parser.add_argument(
        '--metadata',
        type=arg_parsers.ArgDict(min_length=1),
        action='append',
        default=None,
        help=('Metadata to be made available to the guest operating system '
              'running on the instances'),
        metavar='KEY=VALUE')

    # Either allow creating a single node cluster (--single-node), or specifying
    # the number of workers in the multi-node cluster (--num-workers and
    # --num-preemptible-workers)
    node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
    node_group.add_argument('--single-node',
                            action='store_true',
                            help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
    # Not mutually exclusive
    worker_group = node_group.add_argument_group(
        help='Multi-node cluster flags')
    worker_group.add_argument(
        '--num-workers',
        type=int,
        help='The number of worker nodes in the cluster. Defaults to '
        'server-specified.')
    worker_group.add_argument(
        '--num-preemptible-workers',
        type=int,
        help='The number of preemptible worker nodes in the cluster.')

    parser.add_argument(
        '--master-machine-type',
        help='The type of machine to use for the master. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--worker-machine-type',
        help='The type of machine to use for workers. Defaults to '
        'server-specified.')
    if beta:
        image_parser = parser.add_mutually_exclusive_group()
        # TODO(b/73291743): Add external doc link to --image
        image_parser.add_argument(
            '--image',
            metavar='IMAGE',
            help='The full custom image URI or the custom image name that '
            'will be used to create a cluster.')
        image_parser.add_argument(
            '--image-version',
            metavar='VERSION',
            help='The image version to use for the cluster. Defaults to the '
            'latest version.')
    else:
        parser.add_argument(
            '--image',
            hidden=True,
            help='The full image URI to use with the cluster. Overrides '
            '--image-version.')
        parser.add_argument(
            '--image-version',
            metavar='VERSION',
            help='The image version to use for the cluster. Defaults to the '
            'latest version.')

    parser.add_argument(
        '--bucket',
        help='The Google Cloud Storage bucket to use with the Google Cloud '
        'Storage connector. A bucket is auto created when this parameter is '
        'not specified.')

    netparser = parser.add_mutually_exclusive_group()
    netparser.add_argument('--network',
                           help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
    netparser.add_argument('--subnet',
                           help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
    parser.add_argument(
        '--num-worker-local-ssds',
        type=int,
        help='The number of local SSDs to attach to each worker in a cluster.')
    parser.add_argument(
        '--num-master-local-ssds',
        type=int,
        help='The number of local SSDs to attach to the master in a cluster.')
    parser.add_argument(
        '--initialization-actions',
        type=arg_parsers.ArgList(min_length=1),
        metavar='CLOUD_STORAGE_URI',
        help=('A list of Google Cloud Storage URIs of '
              'executables to run on each node in the cluster.'))
    parser.add_argument(
        '--initialization-action-timeout',
        type=arg_parsers.Duration(),
        metavar='TIMEOUT',
        default='10m',
        help=('The maximum duration of each initialization action. See '
              '$ gcloud topic datetimes for information on duration formats.'))
    parser.add_argument(
        '--num-masters',
        type=arg_parsers.CustomFunctionValidator(
            lambda n: int(n) in [1, 3],
            'Number of masters must be 1 (Standard) or 3 (High Availability)',
            parser=arg_parsers.BoundedInt(1, 3)),
        help="""\
      The number of master nodes in the cluster.

      [format="csv",options="header"]
      |========
      Number of Masters,Cluster Mode
      1,Standard
      3,High Availability
      |========
      """)
    parser.add_argument('--properties',
                        type=arg_parsers.ArgDict(),
                        metavar='PREFIX:PROPERTY=VALUE',
                        default={},
                        help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

[format="csv",options="header"]
|========
Prefix,Target Configuration File
core,core-site.xml
hdfs,hdfs-site.xml
mapred,mapred-site.xml
yarn,yarn-site.xml
hive,hive-site.xml
pig,pig.properties
spark,spark-defaults.conf
|========

""")
    parser.add_argument(
        '--service-account',
        help='The Google Cloud IAM service account to be authenticated as.')
    parser.add_argument('--scopes',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='SCOPE',
                        help="""\
Specifies scopes for the node instances. The project's default service account
is used. Multiple SCOPEs can specified, separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following scopes necessary for the cluster to function properly are always
added, even if not explicitly specified:

[format="csv"]
|========
{minimum_scopes}
|========

If this flag is not specified the following default scopes are also included:

[format="csv"]
|========
{additional_scopes}
|========

If you want to enable all scopes use the 'cloud-platform' scope.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========

{scope_deprecation_msg}
""".format(minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS),
           additional_scopes='\n'.join(
               constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
           aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP,
           scope_deprecation_msg=compute_constants.DEPRECATED_SCOPES_MESSAGES))

    master_boot_disk_size = parser.add_mutually_exclusive_group()
    worker_boot_disk_size = parser.add_mutually_exclusive_group()

    # Deprecated, to be removed at a future date.
    master_boot_disk_size.add_argument(
        '--master-boot-disk-size-gb',
        action=actions.DeprecationAction(
            '--master-boot-disk-size-gb',
            warn=(
                'The `--master-boot-disk-size-gb` flag is deprecated. '
                'Use `--master-boot-disk-size` flag with "GB" after value.')),
        type=int,
        hidden=True,
        help='Use `--master-boot-disk-size` flag with "GB" after value.')
    worker_boot_disk_size.add_argument(
        '--worker-boot-disk-size-gb',
        action=actions.DeprecationAction(
            '--worker-boot-disk-size-gb',
            warn=(
                'The `--worker-boot-disk-size-gb` flag is deprecated. '
                'Use `--worker-boot-disk-size` flag with "GB" after value.')),
        type=int,
        hidden=True,
        help='Use `--worker-boot-disk-size` flag with "GB" after value.')

    boot_disk_size_detailed_help = """\
      The size of the boot disk. The value must be a
      whole number followed by a size unit of ``KB'' for kilobyte, ``MB''
      for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,
      ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk
      can have is 10 GB. Disk size must be a multiple of 1 GB.
      """
    master_boot_disk_size.add_argument(
        '--master-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help=boot_disk_size_detailed_help)
    worker_boot_disk_size.add_argument(
        '--worker-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help=boot_disk_size_detailed_help)
    parser.add_argument('--preemptible-worker-boot-disk-size',
                        type=arg_parsers.BinarySize(lower_bound='10GB'),
                        help=boot_disk_size_detailed_help)

    # Args that are visible only in Beta track
    parser.add_argument('--no-address',
                        action='store_true',
                        help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """,
                        hidden=not beta)

    if beta:
        boot_disk_type_detailed_help = """\
        The type of the boot disk. The value must be ``pd-standard'' or
        ``pd-ssd''.
        """
        parser.add_argument('--master-boot-disk-type',
                            help=boot_disk_type_detailed_help)
        parser.add_argument('--worker-boot-disk-type',
                            help=boot_disk_type_detailed_help)
        parser.add_argument('--preemptible-worker-boot-disk-type',
                            help=boot_disk_type_detailed_help)
  def Args(parser):
    # Use BaseSSHCommand args here, since we don't want --plain or
    # --strict-host-key-checking.
    ssh_utils.BaseSSHCommand.Args(parser)

    parser.add_argument(
        '--dry-run',
        action='store_true',
        help=('If provided, the ssh command is printed to standard out '
              'rather than being executed.'))

    # This flag should be hidden for this command, but needs to exist.
    parser.add_argument(
        '--plain',
        action='store_true',
        help=argparse.SUPPRESS)

    user_host = parser.add_argument(
        'user_host',
        completion_resource='compute.instances',
        help='Specifies the user/instance for the serial port connection',
        metavar='[USER@]INSTANCE')
    user_host.detailed_help = """\
        Specifies the user/instance for the serial port connection.

        ``USER'' specifies the username to authenticate as. If omitted,
        the current OS user is selected.
        """

    port = parser.add_argument(
        '--port',
        help=('The number of the requested serial port. '
              'Can be 1-4, default is 1.'),
        type=arg_parsers.BoundedInt(1, 4))
    port.detailed_help = """\
        The number of the requested serial port. Can be 1-4, default is 1.

        Instances can support up to four serial ports. By default, this
        command will connect to the first serial port. Setting this flag
        will connect to the requested serial port.
        """

    extra_args = parser.add_argument(
        '--extra-args',
        help=('Extra key-value pairs to pass to the connection.'),
        type=arg_parsers.ArgDict(min_length=1),
        default={},
        metavar='KEY=VALUE')
    extra_args.detailed_help = """\
        Optional arguments can be passed to the serial port connection by
        passing key-value pairs to this flag, such as max-connections=N or
        replay-lines=N. See {0} for additional options.
        """.format(SERIAL_PORT_HELP)

    parser.add_argument(
        '--serial-port-gateway',
        help=argparse.SUPPRESS,
        default=SERIAL_PORT_GATEWAY)

    flags.AddZoneFlag(
        parser,
        resource_type='instance',
        operation_type='connect to')
示例#22
0
def _AddMinPortsPerVmArg(parser, for_create=False):
    """Adds an argument to specify the minimum number of ports per VM for NAT."""
    _AddClearableArgument(parser, for_create, 'min-ports-per-vm',
                          arg_parsers.BoundedInt(lower_bound=2),
                          'Minimum ports to be allocated to a VM',
                          'Clear minimum ports to be allocated to a VM')
def AddAutoscalerArgs(parser,
                      multizonal_enabled=False,
                      queue_scaling_enabled=False):
    """Adds commandline arguments to parser."""
    parser.add_argument(
        'name',
        metavar='NAME',
        completion_resource='compute.instanceGroupManagers',
        help='Managed instance group which autoscaling parameters will be set.'
    )
    parser.add_argument('--cool-down-period',
                        type=arg_parsers.Duration(),
                        help='Number of seconds Autoscaler will wait between '
                        'resizing collection.')
    parser.add_argument('--description', help='Notes about Autoscaler.')
    parser.add_argument('--min-num-replicas',
                        type=arg_parsers.BoundedInt(0, sys.maxint),
                        help='Minimum number of replicas Autoscaler will set.')
    parser.add_argument('--max-num-replicas',
                        type=arg_parsers.BoundedInt(0, sys.maxint),
                        required=True,
                        help='Maximum number of replicas Autoscaler will set.')
    parser.add_argument('--scale-based-on-cpu',
                        action='store_true',
                        help='Use autoscaling based on cpu utilization.')
    parser.add_argument('--scale-based-on-load-balancing',
                        action='store_true',
                        help=('Use autoscaling based on load balancing '
                              'utilization.'))
    parser.add_argument('--target-cpu-utilization',
                        type=arg_parsers.BoundedFloat(0.0, 1.0),
                        help='CPU utilization level Autoscaler will aim to '
                        'maintain (0.0 to 1.0).')
    parser.add_argument(
        '--target-load-balancing-utilization',
        type=arg_parsers.BoundedFloat(0.0, None),
        help='Load balancing utilization level Autoscaler will '
        'aim to maintain (greater than 0.0).')
    custom_metric_utilization = parser.add_argument(
        '--custom-metric-utilization',
        type=arg_parsers.ArgDict(spec={
            'metric': str,
            'utilization-target': float,
            'utilization-target-type': str,
        }, ),
        # pylint:disable=protected-access
        action=arg_parsers.FloatingListValuesCatcher(argparse._AppendAction),
        help=(
            'Adds target value of a Google Cloud Monitoring metric Autoscaler '
            'will aim to maintain.'),
        metavar='PROPERTY=VALUE',
    )
    custom_metric_utilization.detailed_help = """
   Adds a target metric value for the to the Autoscaler.

   *metric*::: Protocol-free URL of a Google Cloud Monitoring metric.

   *utilization-target*::: Value of the metric Autoscaler will aim to maintain
   (greater than 0.0).

   *utilization-target-type*::: How target is expressed. Valid values: {0}.
  """.format(', '.join(_ALLOWED_UTILIZATION_TARGET_TYPES))

    if queue_scaling_enabled:
        cloud_pub_sub_spec = parser.add_argument(
            '--queue-scaling-cloud-pub-sub',
            type=arg_parsers.ArgDict(spec={
                'topic': str,
                'subscription': str,
            }, ),
            help='Scaling based on Cloud Pub/Sub queuing system.',
            metavar='PROPERTY=VALUE',
        )
        cloud_pub_sub_spec.detailed_help = """
     Specifies queue-based scaling based on a Cloud Pub/Sub queuing system.
     Both topic and subscription are required.

     *topic*::: Topic specification. Can be just a name or a partial URL
     (starting with "projects/..."). Topic must belong to the same project as
     Autoscaler.

     *subscription*::: Subscription specification. Can be just a name or a
     partial URL (starting with "projects/..."). Subscription must belong to the
     same project as Autoscaler and must be connected to the specified topic.
    """
        parser.add_argument(
            '--queue-scaling-acceptable-backlog-per-instance',
            type=arg_parsers.BoundedFloat(0.0, None),
            help='Queue-based scaling target: auotscaler will aim '
            'to assure that average number of tasks in the queue '
            'is no greater than this value.',
        )
        parser.add_argument(
            '--queue-scaling-single-worker-throughput',
            type=arg_parsers.BoundedFloat(0.0, None),
            help='Hint the autoscaler for queue-based scaling on '
            'how much throughput a single worker instance is able '
            'to consume.')
    if multizonal_enabled:
        scope_parser = parser.add_mutually_exclusive_group()
        flags.AddRegionFlag(
            scope_parser,
            resource_type='resources',
            operation_type='update',
            explanation=flags.REGION_PROPERTY_EXPLANATION_NO_DEFAULT)
        flags.AddZoneFlag(
            scope_parser,
            resource_type='resources',
            operation_type='update',
            explanation=flags.ZONE_PROPERTY_EXPLANATION_NO_DEFAULT)
    else:
        flags.AddZoneFlag(parser,
                          resource_type='resources',
                          operation_type='update')
    '--scale-tier',
    help=(
        'Specifies the machine types, the number of replicas for workers and '
        'parameter servers.'),
    choices=_SCALE_TIER_CHOICES,
    default=None)
RUNTIME_VERSION = base.Argument(
    '--runtime-version',
    help=('The Google Cloud ML Engine runtime version for this job. '
          'Defaults to the latest stable version. See '
          'https://cloud.google.com/ml/docs/concepts/runtime-version-list for '
          'a list of accepted versions.'))

POLLING_INTERVAL = base.Argument(
    '--polling-interval',
    type=arg_parsers.BoundedInt(1, sys.maxint, unlimited=True),
    required=False,
    default=60,
    action=actions.StoreProperty(properties.VALUES.ml_engine.polling_interval),
    help='Number of seconds to wait between efforts to fetch the latest '
    'log messages.')
ALLOW_MULTILINE_LOGS = base.Argument(
    '--allow-multiline-logs',
    action='store_true',
    help='Output multiline log messages as single records.')
TASK_NAME = base.Argument(
    '--task-name',
    required=False,
    default=None,
    help='If set, display only the logs for this particular task.')
示例#25
0
def _Args(parser):
    """Register flags for this command.

  Args:
    parser: An argparse.ArgumentParser-like object. It is mocked out in order
        to capture some information, but behaves like an ArgumentParser.
  """
    parser.add_argument('name', help='The name of this cluster.')
    # Timeout in seconds for operation
    parser.add_argument('--timeout',
                        type=int,
                        default=1800,
                        help=argparse.SUPPRESS)
    flags.AddClustersWaitAndAsyncFlags(parser)
    parser.add_argument(
        '--num-nodes',
        type=arg_parsers.BoundedInt(1),
        help=
        'The number of nodes to be created in each of the cluster\'s zones.',
        default=3)
    parser.add_argument('--additional-zones',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='ZONE',
                        help="""\
The set of additional zones in which the specified node footprint should be
replicated. All zones must be in the same region as the cluster's primary zone.
If additional-zones is not specified, all nodes will be in the cluster's primary
zone.

Note that `NUM_NODES` nodes will be created in each zone, such that if you
specify `--num-nodes=4` and choose one additional zone, 8 nodes will be created.

Multiple locations can be specified, separated by commas. For example:

  $ {command} example-cluster --zone us-central1-a --additional-zones us-central1-b,us-central1-c
""")
    parser.add_argument(
        '--machine-type',
        '-m',
        help='The type of machine to use for nodes. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--subnetwork',
        help='The name of the Google Compute Engine subnetwork '
        '(https://cloud.google.com/compute/docs/subnetworks) to which the '
        'cluster is connected. If specified, the cluster\'s network must be a '
        '"custom subnet" network.')
    parser.add_argument(
        '--disable-addons',
        type=arg_parsers.ArgList(
            choices=[api_adapter.INGRESS, api_adapter.HPA]),
        help='List of cluster addons to disable. Options are {0}'.format(
            ', '.join([api_adapter.INGRESS, api_adapter.HPA])))
    parser.add_argument(
        '--network',
        help='The Compute Engine Network that the cluster will connect to. '
        'Google Container Engine will use this network when creating routes '
        'and firewalls for the clusters. Defaults to the \'default\' network.')
    parser.add_argument(
        '--cluster-ipv4-cidr',
        help='The IP address range for the pods in this cluster in CIDR '
        'notation (e.g. 10.0.0.0/14). Due to kube-proxy limitations, this range '
        'must be a subset of the 10.0.0.0/8 space. Defaults to server-specified.'
    )
    parser.add_argument(
        '--password',
        help='The password to use for cluster auth. Defaults to a '
        'server-specified randomly-generated string.')
    parser.add_argument('--scopes',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='SCOPE',
                        help="""\
Specifies scopes for the node instances. The project's default
service account is used. Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/devstorage.read_only

  $ {{command}} example-cluster --scopes bigquery,storage-rw,compute-ro

Multiple SCOPEs can specified, separated by commas. The scopes
necessary for the cluster to function properly (compute-rw, storage-ro),
are always added, even if not explicitly specified.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========
""".format(aliases=compute_constants.ScopesForHelp()))
    parser.add_argument(
        '--enable-cloud-endpoints',
        action='store_true',
        default=True,
        help='Automatically enable Google Cloud Endpoints to take advantage of '
        'API management features.')
    parser.add_argument('--enable-cloud-logging',
                        action='store_true',
                        default=True,
                        help='Automatically send logs from the cluster to the '
                        'Google Cloud Logging API.')
    parser.set_defaults(enable_cloud_logging=True)
    parser.add_argument(
        '--enable-cloud-monitoring',
        action='store_true',
        default=True,
        help='Automatically send metrics from pods in the cluster to the '
        'Google Cloud Monitoring API. VM metrics will be collected by Google '
        'Compute Engine regardless of this setting.')
    parser.set_defaults(enable_cloud_monitoring=True)
    parser.add_argument(
        '--disk-size',
        type=int,
        help='Size in GB for node VM boot disks. Defaults to 100GB.')
    parser.add_argument('--username',
                        '-u',
                        help='The user name to use for cluster auth.',
                        default='admin')
    parser.add_argument(
        '--max-nodes-per-pool',
        type=arg_parsers.BoundedInt(100, api_adapter.MAX_NODES_PER_POOL),
        help='The maximum number of nodes to allocate per default initial node '
        'pool. Container engine will automatically create enough nodes pools '
        'such that each node pool contains less than '
        '--max-nodes-per-pool nodes. Defaults to {nodes} nodes, but can be set '
        'as low as 100 nodes per pool on initial create.'.format(
            nodes=api_adapter.MAX_NODES_PER_POOL))
    flags.AddImageTypeFlag(parser, 'cluster')
    flags.AddNodeLabelsFlag(parser)
    flags.AddTagsFlag(
        parser, """\
Applies the given Compute Engine tags (comma separated) on all nodes in the new
node-pool. Example:

  $ {command} example-cluster --tags=tag1,tag2

New nodes, including ones created by resize or recreate, will have these tags
on the Compute Engine API instance object and can be used in firewall rules.
See https://cloud.google.com/sdk/gcloud/reference/compute/firewall-rules/create
for examples.
""")
示例#26
0
def ArgsForClusterRef(parser,
                      beta=False,
                      include_deprecated=True,
                      include_ttl_config=False,
                      include_gke_platform_args=False):
  """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
    include_deprecated: whether deprecated flags should be included
    include_ttl_config: whether to include Scheduled Delete(TTL) args
    include_gke_platform_args: whether to include GKE-based cluster args
  """
  labels_util.AddCreateLabelsFlags(parser)
  # 30m is backend timeout + 5m for safety buffer.
  flags.AddTimeoutFlag(parser, default='35m')
  flags.AddZoneFlag(parser, short_flags=include_deprecated)
  flags.AddComponentFlag(parser)

  platform_group = parser.add_argument_group(mutex=True)
  gce_platform_group = platform_group.add_argument_group(help="""\
    Compute Engine options for Dataproc clusters.
    """)

  instances_flags.AddTagsArgs(gce_platform_group)
  gce_platform_group.add_argument(
      '--metadata',
      type=arg_parsers.ArgDict(min_length=1),
      action='append',
      default=None,
      help=('Metadata to be made available to the guest operating system '
            'running on the instances'),
      metavar='KEY=VALUE')

  # Either allow creating a single node cluster (--single-node), or specifying
  # the number of workers in the multi-node cluster (--num-workers and
  # --num-secondary-workers)
  node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
  node_group.add_argument(
      '--single-node',
      action='store_true',
      help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
  # Not mutually exclusive
  worker_group = node_group.add_argument_group(help='Multi-node cluster flags')
  worker_group.add_argument(
      '--num-workers',
      type=int,
      help='The number of worker nodes in the cluster. Defaults to '
      'server-specified.')
  worker_group.add_argument(
      '--secondary-worker-type',
      metavar='TYPE',
      choices=['preemptible', 'non-preemptible'],
      default='preemptible',
      help='The type of the secondary worker group.')
  num_secondary_workers = worker_group.add_argument_group(mutex=True)
  num_secondary_workers.add_argument(
      '--num-preemptible-workers',
      action=actions.DeprecationAction(
          '--num-preemptible-workers',
          warn=('The `--num-preemptible-workers` flag is deprecated. '
                'Use the `--num-secondary-workers` flag instead.')),
      type=int,
      hidden=True,
      help='The number of preemptible worker nodes in the cluster.')
  num_secondary_workers.add_argument(
      '--num-secondary-workers',
      type=int,
      help='The number of secondary worker nodes in the cluster.')

  parser.add_argument(
      '--master-machine-type',
      help='The type of machine to use for the master. Defaults to '
      'server-specified.')
  parser.add_argument(
      '--worker-machine-type',
      help='The type of machine to use for workers. Defaults to '
      'server-specified.')
  image_parser = parser.add_mutually_exclusive_group()
  # TODO(b/73291743): Add external doc link to --image
  image_parser.add_argument(
      '--image',
      metavar='IMAGE',
      help='The custom image used to create the cluster. It can '
           'be the image name, the image URI, or the image family URI, which '
           'selects the latest image from the family.')
  image_parser.add_argument(
      '--image-version',
      metavar='VERSION',
      help='The image version to use for the cluster. Defaults to the '
      'latest version.')
  parser.add_argument(
      '--bucket',
      help="""\
      The Google Cloud Storage bucket to use by default to stage job
      dependencies, miscellaneous config files, and job driver console output
      when using this cluster.
      """)

  netparser = gce_platform_group.add_argument_group(mutex=True)
  netparser.add_argument(
      '--network',
      help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
  netparser.add_argument(
      '--subnet',
      help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
  parser.add_argument(
      '--num-worker-local-ssds',
      type=int,
      help='The number of local SSDs to attach to each worker in a cluster.')
  parser.add_argument(
      '--num-master-local-ssds',
      type=int,
      help='The number of local SSDs to attach to the master in a cluster.')
  secondary_worker_local_ssds = parser.add_argument_group(mutex=True)
  secondary_worker_local_ssds.add_argument(
      '--num-preemptible-worker-local-ssds',
      type=int,
      hidden=True,
      action=actions.DeprecationAction(
          '--num-preemptible-worker-local-ssds',
          warn=('The `--num-preemptible-worker-local-ssds` flag is deprecated. '
                'Use the `--num-secondary-worker-local-ssds` flag instead.')),
      help="""\
      The number of local SSDs to attach to each secondary worker in
      a cluster.
      """)
  secondary_worker_local_ssds.add_argument(
      '--num-secondary-worker-local-ssds',
      type=int,
      help="""\
      The number of local SSDs to attach to each preemptible worker in
      a cluster.
      """)
  parser.add_argument(
      '--initialization-actions',
      type=arg_parsers.ArgList(min_length=1),
      metavar='CLOUD_STORAGE_URI',
      help=('A list of Google Cloud Storage URIs of '
            'executables to run on each node in the cluster.'))
  parser.add_argument(
      '--initialization-action-timeout',
      type=arg_parsers.Duration(),
      metavar='TIMEOUT',
      default='10m',
      help=('The maximum duration of each initialization action. See '
            '$ gcloud topic datetimes for information on duration formats.'))
  parser.add_argument(
      '--num-masters',
      type=arg_parsers.CustomFunctionValidator(
          lambda n: int(n) in [1, 3],
          'Number of masters must be 1 (Standard) or 3 (High Availability)',
          parser=arg_parsers.BoundedInt(1, 3)),
      help="""\
      The number of master nodes in the cluster.

      Number of Masters | Cluster Mode
      --- | ---
      1 | Standard
      3 | High Availability
      """)
  parser.add_argument(
      '--properties',
      type=arg_parsers.ArgDict(),
      action=arg_parsers.UpdateAction,
      default={},
      metavar='PREFIX:PROPERTY=VALUE',
      help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

Prefix | File | Purpose of file
--- | --- | ---
capacity-scheduler | capacity-scheduler.xml | Hadoop YARN Capacity Scheduler configuration
core | core-site.xml | Hadoop general configuration
distcp | distcp-default.xml | Hadoop Distributed Copy configuration
hadoop-env | hadoop-env.sh | Hadoop specific environment variables
hdfs | hdfs-site.xml | Hadoop HDFS configuration
hive | hive-site.xml | Hive configuration
mapred | mapred-site.xml | Hadoop MapReduce configuration
mapred-env | mapred-env.sh | Hadoop MapReduce specific environment variables
pig | pig.properties | Pig configuration
spark | spark-defaults.conf | Spark configuration
spark-env | spark-env.sh | Spark specific environment variables
yarn | yarn-site.xml | Hadoop YARN configuration
yarn-env | yarn-env.sh | Hadoop YARN specific environment variables

See https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/cluster-properties
for more information.

""")
  gce_platform_group.add_argument(
      '--service-account',
      help='The Google Cloud IAM service account to be authenticated as.')
  gce_platform_group.add_argument(
      '--scopes',
      type=arg_parsers.ArgList(min_length=1),
      metavar='SCOPE',
      help="""\
Specifies scopes for the node instances. Multiple SCOPEs can be specified,
separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following *minimum scopes* are necessary for the cluster to function
properly and are always added, even if not explicitly specified:

  {minimum_scopes}

If the `--scopes` flag is not specified, the following *default scopes*
are also included:

  {additional_scopes}

If you want to enable all scopes use the 'cloud-platform' scope.

{scopes_help}
""".format(
    minimum_scopes='\n  '.join(constants.MINIMUM_SCOPE_URIS),
    additional_scopes='\n  '.join(constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
    scopes_help=compute_helpers.SCOPES_HELP))

  if include_deprecated:
    _AddDiskArgsDeprecated(parser)
  else:
    _AddDiskArgs(parser)

  # --no-address is an exception to the no negative-flag style guildline to be
  # consistent with gcloud compute instances create --no-address
  parser.add_argument(
      '--no-address',
      action='store_true',
      help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      If omitted the instances in the cluster will each be assigned an
      ephemeral external IP address.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """)

  boot_disk_type_detailed_help = """\
      The type of the boot disk. The value must be ``pd-standard'' or
      ``pd-ssd''.
      """
  parser.add_argument(
      '--master-boot-disk-type', help=boot_disk_type_detailed_help)
  parser.add_argument(
      '--worker-boot-disk-type', help=boot_disk_type_detailed_help)
  secondary_worker_boot_disk_type = parser.add_argument_group(mutex=True)
  secondary_worker_boot_disk_type.add_argument(
      '--preemptible-worker-boot-disk-type',
      help=boot_disk_type_detailed_help,
      hidden=True,
      action=actions.DeprecationAction(
          '--preemptible-worker-boot-disk-type',
          warn=('The `--preemptible-worker-boot-disk-type` flag is deprecated. '
                'Use the `--secondary-worker-boot-disk-type` flag instead.')))
  secondary_worker_boot_disk_type.add_argument(
      '--secondary-worker-boot-disk-type', help=boot_disk_type_detailed_help)

  autoscaling_group = parser.add_argument_group()
  flags.AddAutoscalingPolicyResourceArgForCluster(
      autoscaling_group, api_version=('v1beta2' if beta else 'v1'))

  if include_ttl_config:
    parser.add_argument(
        '--max-idle',
        type=arg_parsers.Duration(),
        help="""\
          The duration before cluster is auto-deleted after last job completes,
          such as "2h" or "1d".
          See $ gcloud topic datetimes for information on duration formats.
          """)

    auto_delete_group = parser.add_mutually_exclusive_group()
    auto_delete_group.add_argument(
        '--max-age',
        type=arg_parsers.Duration(),
        help="""\
          The lifespan of the cluster before it is auto-deleted, such as
          "2h" or "1d".
          See $ gcloud topic datetimes for information on duration formats.
          """)

    auto_delete_group.add_argument(
        '--expiration-time',
        type=arg_parsers.Datetime.Parse,
        help="""\
          The time when cluster will be auto-deleted, such as
          "2017-08-29T18:52:51.142Z." See $ gcloud topic datetimes for
          information on time formats.
          """)

  AddKerberosGroup(parser)

  flags.AddMinCpuPlatformArgs(parser)

  _AddAcceleratorArgs(parser)

  AddReservationAffinityGroup(
      gce_platform_group,
      group_text='Specifies the reservation for the instance.',
      affinity_text='The type of reservation for the instance.')
  if include_gke_platform_args:
    gke_based_cluster_group = platform_group.add_argument_group(
        hidden=True,
        help="""\
          Options for creating a GKE-based Dataproc cluster. Specifying any of these
          will indicate that this cluster is intended to be a GKE-based cluster.
          These options are mutually exclusive with GCE-based options.
          """)
    gke_based_cluster_group.add_argument(
        '--gke-cluster',
        hidden=True,
        help="""\
            Required for GKE-based clusters. Specify the name of the GKE cluster to
            deploy this GKE-based Dataproc cluster to. This should be the short name
            and not the full path name.
            """)
    gke_based_cluster_group.add_argument(
        '--gke-cluster-namespace',
        hidden=True,
        help="""\
            Optional. Specify the name of the namespace to deploy Dataproc system
            components into. This namespace does not need to already exist.
            """)
示例#27
0
def AddExternalMasterGroup(parser):
  """Add flags to the parser for creating an external master and replica."""

  # Group for creating external primary instances.
  external_master_group = parser.add_group(
      required=False,
      help='Options for creating a wrapper for an external data source.')
  external_master_group.add_argument(
      '--source-ip-address',
      required=True,
      type=compute_utils.IPV4Argument,
      help=('Public IP address used to connect to and replicate from '
            'the external data source.'))
  external_master_group.add_argument(
      '--source-port',
      type=arg_parsers.BoundedInt(lower_bound=1, upper_bound=65535),
      # Default MySQL port number.
      default=3306,
      help=('Port number used to connect to and replicate from the '
            'external data source.'))

  # Group for creating replicas of external primary instances.
  internal_replica_group = parser.add_group(
      required=False,
      help=('Options for creating an internal replica of an external data '
            'source.'))
  internal_replica_group.add_argument(
      '--master-username',
      required=True,
      help='Name of the replication user on the external data source.')

  # TODO(b/78648703): Make group required when mutex required status is fixed.
  # For entering the password of the replication user of an external primary.
  master_password_group = internal_replica_group.add_group(
      'Password group.', mutex=True)
  master_password_group.add_argument(
      '--master-password',
      help='Password of the replication user on the external data source.')
  master_password_group.add_argument(
      '--prompt-for-master-password',
      action='store_true',
      help=('Prompt for the password of the replication user on the '
            'external data source. The password is all typed characters up '
            'to but not including the RETURN or ENTER key.'))
  internal_replica_group.add_argument(
      '--master-dump-file-path',
      required=True,
      type=storage_util.ObjectReference.FromArgument,
      help=('Path to the MySQL dump file in Google Cloud Storage from '
            'which the seed import is made. The URI is in the form '
            'gs://bucketName/fileName. Compressed gzip files (.gz) are '
            'also supported.'))

  # For specifying SSL certs for connecting to an external primary.
  credential_group = internal_replica_group.add_group(
      'Client and server credentials.', required=False)
  credential_group.add_argument(
      '--master-ca-certificate-path',
      required=True,
      help=('Path to a file containing the X.509v3 (RFC5280) PEM encoded '
            'certificate of the CA that signed the external data source\'s '
            'certificate.'))

  # For specifying client certs for connecting to an external primary.
  client_credential_group = credential_group.add_group(
      'Client credentials.', required=False)
  client_credential_group.add_argument(
      '--client-certificate-path',
      required=True,
      help=('Path to a file containing the X.509v3 (RFC5280) PEM encoded '
            'certificate that will be used by the replica to authenticate '
            'against the external data source.'))
  client_credential_group.add_argument(
      '--client-key-path',
      required=True,
      help=('Path to a file containing the unencrypted PKCS#1 or PKCS#8 '
            'PEM encoded private key associated with the '
            'clientCertificate.'))
示例#28
0
 def Args(parser):
     _BasePatch.Args(parser)
     base.Argument(
         '--storage-auto-increase',
         action='store_true',
         default=None,
         help='Storage size can be increased, but it cannot be '
         'decreased; storage increases are permanent for the life of the '
         'instance. With this setting enabled, a spike in storage requirements '
         'can result in permanently increased storage costs for your instance. '
         'However, if an instance runs out of available space, it can result in '
         'the instance going offline, dropping existing connections.'
     ).AddToParser(parser)
     parser.add_argument(
         '--storage-size',
         type=arg_parsers.BinarySize(lower_bound='10GB',
                                     upper_bound='10230GB',
                                     suggested_binary_size_scales=['GB']),
         help=
         'Amount of storage allocated to the instance. Must be an integer '
         'number of GB between 10GB and 10230GB inclusive.')
     parser.add_argument(
         '--maintenance-release-channel',
         choices={
             'production':
             'Production updates are stable and recommended '
             'for applications in production.',
             'preview':
             'Preview updates release prior to production '
             'updates. You may wish to use the preview channel '
             'for dev/test applications so that you can preview '
             'their compatibility with your application prior '
             'to the production release.'
         },
         type=str.lower,
         help=
         "Which channel's updates to apply during the maintenance window.")
     parser.add_argument(
         '--maintenance-window-day',
         choices=arg_parsers.DayOfWeek.DAYS,
         type=arg_parsers.DayOfWeek.Parse,
         help='Day of week for maintenance window, in UTC time zone.')
     parser.add_argument(
         '--maintenance-window-hour',
         type=arg_parsers.BoundedInt(lower_bound=0, upper_bound=23),
         help='Hour of day for maintenance window, in UTC time zone.')
     parser.add_argument(
         '--maintenance-window-any',
         action='store_true',
         help='Removes the user-specified maintenance window.')
     parser.add_argument(
         '--cpu',
         type=int,
         required=False,
         help='A whole number value indicating how many cores are desired in '
         'the machine. Both --cpu and --memory must be specified if a custom '
         'machine type is desired, and the --tier flag must be omitted.')
     parser.add_argument(
         '--memory',
         type=arg_parsers.BinarySize(),
         required=False,
         help='A whole number value indicating how much memory is desired in '
         'the machine. A size unit should be provided (eg. 3072MiB or 9GiB) - '
         'if no units are specified, GiB is assumed. Both --cpu and --memory '
         'must be specified if a custom machine type is desired, and the --tier '
         'flag must be omitted.')
示例#29
0
 def Args(parser):
     """Register flags for this command."""
     base.LIMIT_FLAG.RemoveFromParser(parser)
     parser.add_argument(
         'name',
         nargs='?',
         help=
         ('Name of the function which logs are to be displayed. If no name '
          'is specified, logs from all functions are displayed.'))
     parser.add_argument(
         '--execution-id',
         help=('Execution ID for which logs are to be displayed.'))
     parser.add_argument(
         '--start-time',
         required=False,
         type=arg_parsers.Datetime.Parse,
         help=
         ('Return only log entries which timestamps are not earlier than '
          'the specified time. The timestamp must be in RFC3339 UTC "Zulu" '
          'format. If --start-time is specified, the command returns '
          '--limit earliest log entries which appeared after '
          '--start-time.'))
     parser.add_argument(
         '--end-time',
         required=False,
         type=arg_parsers.Datetime.Parse,
         help=
         ('Return only log entries which timestamps are not later than '
          'the specified time. The timestamp must be in RFC3339 UTC "Zulu" '
          'format. If --end-time is specified but --start-time is not, the '
          'command returns --limit latest log entries which appeared '
          'before --end-time.'))
     parser.add_argument(
         '--limit',
         required=False,
         type=arg_parsers.BoundedInt(1, 1000),
         default=20,
         help=(
             'Number of log entries to be fetched; must not be greater than '
             '1000.'))
     parser.add_argument(
         '--min-log-level',
         choices=GetLogs.SEVERITIES,
         help=
         ('Minimum level of logs to be fetched; can be one of DEBUG, INFO, '
          'ERROR.'))
     parser.add_argument('--show-log-levels',
                         action='store_true',
                         default=True,
                         help=('Print a log level of each log entry.'))
     parser.add_argument(
         '--show-function-names',
         action='store_true',
         default=True,
         help=('Print a function name before each log entry.'))
     parser.add_argument(
         '--show-execution-ids',
         action='store_true',
         default=True,
         help=('Print an execution ID before each log entry.'))
     parser.add_argument(
         '--show-timestamps',
         action='store_true',
         default=True,
         help=('Print a UTC timestamp before each log entry.'))
示例#30
0
def AddSubscriptionSettingsFlags(parser,
                                 is_update=False,
                                 support_message_ordering=False,
                                 support_dead_letter_queues=False):
    """Adds the flags for creating or updating a subscription.

  Args:
    parser: The argparse parser.
    is_update: Whether or not this is for the update operation (vs. create).
    support_message_ordering: Whether or not flags for ordering should be added.
    support_dead_letter_queues: Whether or not flags for dead letter queues
      should be added.
  """
    AddAckDeadlineFlag(parser)
    AddPushConfigFlags(parser)
    AddMessageRetentionFlags(parser, is_update)
    if support_message_ordering and not is_update:
        parser.add_argument(
            '--enable-message-ordering',
            action='store_true',
            default=None,
            help=
            """Whether or not to receive messages with the same ordering key in
            order. If true, messages with the same ordering key will by sent to
            subscribers in the order in which they were received by Cloud
            Pub/Sub.""")
    if support_dead_letter_queues:
        current_group = parser
        if is_update:
            mutual_exclusive_group = current_group.add_mutually_exclusive_group(
            )
            mutual_exclusive_group.add_argument(
                '--clear-dead-letter-policy',
                action='store_true',
                default=None,
                help=
                """If set, clear the dead letter policy from the subscription."""
            )
            current_group = mutual_exclusive_group

        set_dead_letter_policy_group = current_group.add_argument_group(
            'Dead Letter Queue Options')
        set_dead_letter_policy_group.add_argument(
            '--dead-letter-topic',
            type=str,
            default=None,
            help="""Name of the topic to which dead letter messages should be
            published. Format is `projects/{project}/topics/{topic}`. The Cloud
            Pub/Sub service account associated with the enclosing
            subscription's parent project (i.e.,
            service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com)
            must have permission to Publish() to this topic and Acknowledge()
            messages on this subscription.""")
        set_dead_letter_policy_group.add_argument(
            '--max-delivery-attempts',
            type=arg_parsers.BoundedInt(5, 100),
            default=None,
            help=
            """Maximum number of delivery attempts for any message. The value
            must be between 5 and 100. Defaults to 5. `--dead-letter-topic`
            must also be specified.""")
    parser.add_argument(
        '--expiration-period',
        type=ParseExpirationPeriodWithNeverSentinel,
        help="""The subscription will expire if it is inactive for the given
          period. Valid values are strings of the form INTEGER[UNIT], where
          UNIT is one of "s", "m", "h", and "d" for seconds, minutes, hours,
          and days, respectively. If the unit is omitted, seconds is
          assumed. This flag additionally accepts the special value "never" to
          indicate that the subscription will never expire.""")