def _CommonArgs(parser): """Register flags common to all tracks.""" base.ASYNC_FLAG.AddToParser(parser) # Allow the user to specify new labels as well as update/remove existing labels_util.AddUpdateLabelsFlags(parser) # Updates can take hours if a lot of data needs to be moved on HDFS util.AddTimeoutFlag(parser, default='3h') parser.add_argument('name', help='The name of the cluster to update.') parser.add_argument('--num-workers', type=int, help='The new number of worker nodes in the cluster.') parser.add_argument( '--num-preemptible-workers', type=int, help='The new number of preemptible worker nodes in the cluster.')
def Args(parser): parser.add_argument('name', help='The name of the cluster to delete.') base.ASYNC_FLAG.AddToParser(parser) util.AddTimeoutFlag(parser)
def Args(parser): util.AddTimeoutFlag(parser) parser.add_argument('name', help='The name of the cluster to diagnose.')
def _CommonArgs(parser): """Register flags common to all tracks.""" instances_flags.AddTagsArgs(parser) base.ASYNC_FLAG.AddToParser(parser) labels_util.AddCreateLabelsFlags(parser) # 30m is backend timeout + 5m for safety buffer. util.AddTimeoutFlag(parser, default='35m') parser.add_argument( '--metadata', type=arg_parsers.ArgDict(min_length=1), action='append', default=None, help=('Metadata to be made available to the guest operating system ' 'running on the instances'), metavar='KEY=VALUE') parser.add_argument('name', help='The name of this cluster.') parser.add_argument( '--num-workers', type=int, help='The number of worker nodes in the cluster. Defaults to ' 'server-specified.') parser.add_argument( '--num-preemptible-workers', type=int, help='The number of preemptible worker nodes in the cluster.') parser.add_argument( '--master-machine-type', help='The type of machine to use for the master. Defaults to ' 'server-specified.') parser.add_argument( '--worker-machine-type', help='The type of machine to use for workers. Defaults to ' 'server-specified.') parser.add_argument('--image', hidden=True) parser.add_argument( '--image-version', metavar='VERSION', help='The image version to use for the cluster. Defaults to the ' 'latest version.') parser.add_argument( '--bucket', help='The Google Cloud Storage bucket to use with the Google Cloud ' 'Storage connector. A bucket is auto created when this parameter is ' 'not specified.') netparser = parser.add_mutually_exclusive_group() netparser.add_argument( '--network', help="""\ The Compute Engine network that the VM instances of the cluster will be part of. This is mutually exclusive with --subnet. If neither is specified, this defaults to the "default" network. """) netparser.add_argument( '--subnet', help="""\ Specifies the subnet that the cluster will be part of. This is mutally exclusive with --network. """) parser.add_argument( '--zone', '-z', help='The compute zone (e.g. us-central1-a) for the cluster.', action=actions.StoreProperty(properties.VALUES.compute.zone)) parser.add_argument( '--num-worker-local-ssds', type=int, help='The number of local SSDs to attach to each worker in a cluster.') parser.add_argument( '--num-master-local-ssds', type=int, help='The number of local SSDs to attach to the master in a cluster.') parser.add_argument( '--initialization-actions', type=arg_parsers.ArgList(min_length=1), metavar='CLOUD_STORAGE_URI', help=('A list of Google Cloud Storage URIs of ' 'executables to run on each node in the cluster.')) parser.add_argument( '--initialization-action-timeout', type=arg_parsers.Duration(), metavar='TIMEOUT', default='10m', help='The maximum duration of each initialization action.') parser.add_argument( '--properties', type=arg_parsers.ArgDict(), metavar='PREFIX:PROPERTY=VALUE', default={}, help="""\ Specifies configuration properties for installed packages, such as Hadoop and Spark. Properties are mapped to configuration files by specifying a prefix, such as "core:io.serializations". The following are supported prefixes and their mappings: [format="csv",options="header"] |======== Prefix,Target Configuration File core,core-site.xml hdfs,hdfs-site.xml mapred,mapred-site.xml yarn,yarn-site.xml hive,hive-site.xml pig,pig.properties spark,spark-defaults.conf |======== """) parser.add_argument( '--service-account', help='The Google Cloud IAM service account to be authenticated as.') parser.add_argument( '--scopes', type=arg_parsers.ArgList(min_length=1), metavar='SCOPE', help="""\ Specifies scopes for the node instances. The project's default service account is used. Multiple SCOPEs can specified, separated by commas. Examples: $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin $ {{command}} example-cluster --scopes sqlservice,bigquery The following scopes necessary for the cluster to function properly are always added, even if not explicitly specified: [format="csv"] |======== {minimum_scopes} |======== If this flag is not specified the following default scopes are also included: [format="csv"] |======== {additional_scopes} |======== If you want to enable all scopes use the 'cloud-platform' scope. SCOPE can be either the full URI of the scope or an alias. Available aliases are: [format="csv",options="header"] |======== Alias,URI {aliases} |======== """.format( minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS), additional_scopes='\n'.join(constants.ADDITIONAL_DEFAULT_SCOPE_URIS), aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP)) master_boot_disk = parser.add_mutually_exclusive_group() worker_boot_disk = parser.add_mutually_exclusive_group() # Deprecated, to be removed at a future date. master_boot_disk.add_argument( '--master-boot-disk-size-gb', type=int, hidden=True) worker_boot_disk.add_argument( '--worker-boot-disk-size-gb', type=int, hidden=True) boot_disk_size_detailed_help = """\ The size of the boot disk. The value must be a whole number followed by a size unit of ``KB'' for kilobyte, ``MB'' for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example, ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk can have is 10 GB. Disk size must be a multiple of 1 GB. """ master_boot_disk.add_argument( '--master-boot-disk-size', type=arg_parsers.BinarySize(lower_bound='10GB'), help=boot_disk_size_detailed_help) worker_boot_disk.add_argument( '--worker-boot-disk-size', type=arg_parsers.BinarySize(lower_bound='10GB'), help=boot_disk_size_detailed_help) parser.add_argument( '--preemptible-worker-boot-disk-size', type=arg_parsers.BinarySize(lower_bound='10GB'), help="""\ The size of the boot disk. The value must be a whole number followed by a size unit of ``KB'' for kilobyte, ``MB'' for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example, ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk can have is 10 GB. Disk size must be a multiple of 1 GB. """)
def Args(parser): flags.AddTemplateFlag(parser, 'run') util.AddTimeoutFlag(parser, default='35m') base.ASYNC_FLAG.AddToParser(parser)