def Args(parser): ssh_utils.BaseSSHCLICommand.Args(parser) parser.add_argument( 'sources', help='Specifies a source file.', metavar='[[USER@]INSTANCE:]SRC', nargs='+') parser.add_argument( 'destination', help='Specifies a destination for the source files.', metavar='[[USER@]INSTANCE:]DEST') # TODO(user): Use utils.AddZoneFlag when copy_files supports URIs zone = parser.add_argument( '--zone', help='The zone of the instance to copy files to/from.', action=actions.StoreProperty(properties.VALUES.compute.zone)) zone.detailed_help = ( 'The zone of the instance to copy files to/from. If omitted, ' 'you will be prompted to select a zone.')
def Args(parser): ssh_utils.BaseSSHCLICommand.Args(parser) parser.add_argument('sources', help='Specifies a source file.', metavar='[[USER@]INSTANCE:]SRC', nargs='+') parser.add_argument( 'destination', help='Specifies a destination for the source files.', metavar='[[USER@]INSTANCE:]DEST') # TODO(b/36053572): Use flags.AddZoneFlag when copy_files supports URIs parser.add_argument('--zone', action=actions.StoreProperty( properties.VALUES.compute.zone), help="""\ The zone of the instance to copy files to/from. If omitted, you will be prompted to select a zone. """ + flags.ZONE_PROPERTY_EXPLANATION)
def Args(parser): ssh_utils.BaseSSHCLICommand.Args(parser) parser.add_argument( '--port', help='The port to connect to.') parser.add_argument( '--recurse', action='store_true', help='Upload directories recursively.') parser.add_argument( '--compress', action='store_true', help='Enable compression.') parser.add_argument( '--scp-flag', action='append', help='Extra flag to be sent to scp. This flag may be repeated.') parser.add_argument( 'sources', help='Specifies the files to copy.', metavar='[[USER@]INSTANCE:]SRC', nargs='+') parser.add_argument( 'destination', help='Specifies a destination for the source files.', metavar='[[USER@]INSTANCE:]DEST') # TODO(b/21515936): Use flags.AddZoneFlag when copy_files supports URIs parser.add_argument( '--zone', action=actions.StoreProperty(properties.VALUES.compute.zone), help=('The zone of the instance to copy files to/from.\n\n' + flags.ZONE_PROPERTY_EXPLANATION))
def AddRegionFlag(parser, resource_type, operation_type, flag_prefix=None, explanation=REGION_PROPERTY_EXPLANATION, help_text=None, hidden=False, plural=False, custom_plural=None): """Adds a --region flag to the given parser. Args: parser: argparse parser. resource_type: str, human readable name for the resource type this flag is qualifying, for example "instance group". operation_type: str, human readable name for the operation, for example "update" or "delete". flag_prefix: str, flag will be named --{flag_prefix}-region. explanation: str, detailed explanation of the flag. help_text: str, help text will be overridden with this value. hidden: bool, If True, --region argument help will be hidden. plural: bool, resource_type will be pluralized or not depending on value. custom_plural: str, If plural is True then this string will be used as resource types, otherwise resource_types will be pluralized by appending 's'. """ short_help = 'Region of the {0} to {1}.'.format( text.Pluralize(int(plural) + 1, resource_type or '', custom_plural), operation_type) flag_name = 'region' if flag_prefix is not None: flag_name = flag_prefix + '-' + flag_name parser.add_argument( '--' + flag_name, completer=completers.RegionsCompleter, action=actions.StoreProperty(properties.VALUES.compute.region), hidden=hidden, help=help_text or '{0} {1}'.format(short_help, explanation))
def Args(parser): project_arg = parser.add_argument( '--project', metavar='PROJECT_ID', dest='project', help='Google Cloud Platform project ID to use for this invocation.', action=actions.StoreProperty(properties.VALUES.core.project)) cli = Gcloud.GetCLIGenerator() collection = 'cloudresourcemanager.projects' project_arg.completer = (remote_completion.RemoteCompletion. GetCompleterForResource(collection, cli, 'alpha.projects')) project_arg.detailed_help = """\ The Google Cloud Platform project name to use for this invocation. If omitted then the current project is assumed. """ # Must have a None default so properties are not always overridden when the # arg is not provided. quiet_arg = parser.add_argument( '--quiet', '-q', default=None, help='Disable all interactive prompts.', action=actions.StoreConstProperty( properties.VALUES.core.disable_prompts, True)) quiet_arg.detailed_help = """\ Disable all interactive prompts when running gcloud commands. If input is required, defaults will be used, or an error will be raised. """ trace_group = parser.add_mutually_exclusive_group() trace_group.add_argument( '--trace-token', default=None, help='Token used to route traces of service requests for investigation' ' of issues.')
def Args(parser): """Set up arguments for this command. Args: parser: An argparse.ArgumentParser. """ super(BaseScpHelper, BaseScpHelper).Args(parser) parser.add_argument('sources', completer=FilesCompleter, help='Specifies the files to copy.', metavar='[[USER@]INSTANCE:]SRC', nargs='+') parser.add_argument( 'destination', help='Specifies a destination for the source files.', metavar='[[USER@]INSTANCE:]DEST') parser.add_argument( '--zone', action=actions.StoreProperty(properties.VALUES.compute.zone), help=('The zone of the instance to copy files to/from.\n\n' + flags.ZONE_PROPERTY_EXPLANATION))
def Args(parser): """Register flags for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. """ parser.add_argument('name', metavar='NAME', help='The name of the node pool to delete.') parser.add_argument('--timeout', type=int, default=1800, help=argparse.SUPPRESS) parser.add_argument( '--wait', action='store_true', default=True, help='Poll the operation for completion after issuing a delete ' 'request.') parser.add_argument( '--cluster', help='The cluster from which to delete the node pool.', action=actions.StoreProperty(properties.VALUES.container.cluster))
def Args(parser): """Set up arguments for this command. Args: parser: An argparse.ArgumentParser. """ super(BaseScpCommand, BaseScpCommand).Args(parser) parser.add_argument('sources', help='Specifies the files to copy.', metavar='[[USER@]INSTANCE:]SRC', nargs='+') parser.add_argument( 'destination', help='Specifies a destination for the source files.', metavar='[[USER@]INSTANCE:]DEST') # TODO(b/21515936): Use flags.AddZoneFlag when copy_files supports URIs. parser.add_argument( '--zone', action=actions.StoreProperty(properties.VALUES.compute.zone), help=('The zone of the instance to copy files to/from.\n\n' + flags.ZONE_PROPERTY_EXPLANATION))
def Args(parser): parser.add_argument('name', help='The name of this cluster.') parser.add_argument( '--num-workers', type=int, help='The number of worker nodes in the cluster. Defaults to ' 'server-specified.') parser.add_argument( '--num-preemptible-workers', type=int, help='The number of preemptible worker nodes in the cluster.') parser.add_argument( '--master-machine-type', help='The type of machine to use for the master. Defaults to ' 'server-specified.') parser.add_argument( '--worker-machine-type', help='The type of machine to use for workers. Defaults to ' 'server-specified.') parser.add_argument('--image', help=argparse.SUPPRESS) parser.add_argument( '--bucket', help= 'The GCS bucket to use with the GCS connector. A bucket is auto ' 'created when this parameter is not specified.') parser.add_argument( '--network', help='The Compute Engine network that the cluster will connect to. ' 'Google Cloud Dataproc will use this network when creating routes ' 'and firewalls for the clusters. Defaults to the \'default\' network.' ) parser.add_argument( '--zone', '-z', help='The compute zone (e.g. us-central1-a) for the cluster.', action=actions.StoreProperty(properties.VALUES.compute.zone)) parser.add_argument( '--num-worker-local-ssds', type=int, help= 'The number of local SSDs to attach to each worker in a cluster.') parser.add_argument( '--num-master-local-ssds', type=int, help= 'The number of local SSDs to attach to the master in a cluster.') parser.add_argument( '--worker-boot-disk-size-gb', type=int, help='The size in GB of the boot disk of each worker in a cluster.' ) parser.add_argument( '--master-boot-disk-size-gb', type=int, help='The size in GB of the boot disk of the master in a cluster.') parser.add_argument( '--initialization-actions', type=arg_parsers.ArgList(), metavar='GCS_URI', help=('A list of Google Cloud Storage URIs of ' 'executables to run on each node in the cluster.')) parser.add_argument( '--initialization-action-timeout', type=arg_parsers.Duration(), metavar='TIMEOUT', default='10m', help='The maximum duration of each initialization action.')
def __AddBuiltinGlobalFlags(self, top_element): """Adds in calliope builtin global flags. This needs to happen immediately after the top group is loaded and before any other groups are loaded. The flags must be present so when sub groups are loaded, the flags propagate down. Args: top_element: backend._CommandCommon, The root of the command tree. """ if self.__version_func is not None: # pylint: disable=protected-access version_flag = top_element.ai.add_argument( '-v', '--version', action=actions.FunctionExitAction(self.__version_func), help='Print version information.') version_flag.global_only = True # pylint: disable=protected-access top_element.ai.add_argument( '--verbosity', choices=log.OrderedVerbosityNames(), default=None, help=( 'Override the default verbosity for this command. This must be ' 'a standard logging verbosity level: [{values}] (Default: ' '[{default}]).').format( values=', '.join(log.OrderedVerbosityNames()), default=log.DEFAULT_VERBOSITY_STRING), action=actions.StoreProperty(properties.VALUES.core.verbosity)) top_element.ai.add_argument( '--user-output-enabled', default=None, choices=('true', 'false'), help=( 'Control whether user intended output is printed to the console. ' '(true/false)'), action=actions.StoreProperty( properties.VALUES.core.user_output_enabled)) format_flag = top_element.ai.add_argument( '--format', help='Format for printed output.', choices=resource_printer.SUPPORTED_FORMATS) format_flag.detailed_help = """\ Specify a format for printed output. By default, a command-specific human-friendly output format is used. Setting this flag to one of the available options will serialize the result of the command in the chosen format and print it to stdout. Supported formats are: `{0}`.""".format('`, `'.join(resource_printer.SUPPORTED_FORMATS)) # Logs all HTTP server requests and responses to stderr. top_element.ai.add_argument( '--log-http', action='store_true', default=None, help='Logs all HTTP server requests and responses to stderr.') # Timeout value for HTTP requests. top_element.ai.add_argument( '--http-timeout', default=None, type=float, help=argparse.SUPPRESS)
def _Args(parser): """Register flags for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. """ parser.add_argument('name', help='The name of the node pool to create.') parser.add_argument('--cluster', help='The cluster to add the node pool to.', action=actions.StoreProperty( properties.VALUES.container.cluster)) parser.add_argument( '--enable-cloud-endpoints', action='store_true', default=True, help='Automatically enable Google Cloud Endpoints to take advantage of ' 'API management features.') # Timeout in seconds for operation parser.add_argument('--timeout', type=int, default=1800, help=argparse.SUPPRESS) parser.add_argument( '--num-nodes', type=int, help='The number of nodes in the node pool in each of the ' 'cluster\'s zones.', default=3) parser.add_argument( '--machine-type', '-m', help='The type of machine to use for nodes. Defaults to ' 'server-specified') parser.add_argument( '--disk-size', type=int, help='Size in GB for node VM boot disks. Defaults to 100GB.') parser.add_argument( '--scopes', type=arg_parsers.ArgList(min_length=1), metavar='SCOPE', help="""\ Specifies scopes for the node instances. The project's default service account is used. Examples: $ {{command}} node-pool-1 --cluster=example-cluster --scopes https://www.googleapis.com/auth/devstorage.read_only $ {{command}} node-pool-1 --cluster=example-cluster --scopes bigquery,storage-rw,compute-ro Multiple SCOPEs can specified, separated by commas. The scopes necessary for the cluster to function properly (compute-rw, storage-ro), are always added, even if not explicitly specified. SCOPE can be either the full URI of the scope or an alias. Available aliases are: [options="header",format="csv",grid="none",frame="none"] |======== Alias,URI {aliases} |======== """.format(aliases='\n'.join( ','.join(value) for value in sorted(constants.SCOPES.iteritems())))) parser.add_argument('--tags', help=argparse.SUPPRESS, type=arg_parsers.ArgList(min_length=1), metavar='TAGS') flags.AddImageTypeFlag(parser, 'node pool') flags.AddNodeLabelsFlag(parser, for_node_pool=True)
def __AddBuiltinGlobalFlags(self, top_element): """Adds in calliope builtin global flags. This needs to happen immediately after the top group is loaded and before any other groups are loaded. The flags must be present so when sub groups are loaded, the flags propagate down. Args: top_element: backend._CommandCommon, The root of the command tree. """ if self.__version_func is not None: top_element.ai.add_argument( '-v', '--version', do_not_propagate=True, category=calliope_base.COMMONLY_USED_FLAGS, action=actions.FunctionExitAction(self.__version_func), help= 'Print version information and exit. This flag is only available' ' at the global level.') top_element.ai.add_argument('--configuration', metavar='CONFIGURATION', category=calliope_base.COMMONLY_USED_FLAGS, help="""\ The configuration to use for this command invocation. For more information on how to use configurations, run: `gcloud topic configurations`. You can also use the [{0}] environment variable to set the equivalent of this flag for a terminal session.""".format(config.CLOUDSDK_ACTIVE_CONFIG_NAME)) top_element.ai.add_argument( '--verbosity', choices=log.OrderedVerbosityNames(), default=log.DEFAULT_VERBOSITY_STRING, category=calliope_base.COMMONLY_USED_FLAGS, help='Override the default verbosity for this command.', action=actions.StoreProperty(properties.VALUES.core.verbosity)) # This should be a pure Boolean flag, but the alternate true/false explicit # value form is preserved for backwards compatibility. This flag and # is the only Cloud SDK outlier. # TODO(b/24095744): Add true/false deprecation message. top_element.ai.add_argument( '--user-output-enabled', metavar= ' ', # Help text will look like the flag does not have a value. nargs='?', default=None, # Tri-valued, None => don't override the property. const='true', choices=('true', 'false'), action=actions.StoreBooleanProperty( properties.VALUES.core.user_output_enabled), help='Print user intended output to the console.') top_element.ai.add_argument('--flatten', metavar='KEY', default=None, type=arg_parsers.ArgList(), category=calliope_base.COMMONLY_USED_FLAGS, help="""\ Flatten _name_[] output resource slices in _KEY_ into separate records for each item in each slice. Multiple keys and slices may be specified. This also flattens keys for *--format* and *--filter*. For example, *--flatten=abc.def[]* flattens *abc.def[].ghi* references to *abc.def.ghi*. A resource record containing *abc.def[]* with N elements will expand to N records in the flattened output. This flag interacts with other flags that are applied in this order: *--flatten*, *--sort-by*, *--filter*, *--limit*.""") top_element.ai.add_argument( '--format', default=None, category=calliope_base.COMMONLY_USED_FLAGS, help="""\ Sets the format for printing command output resources. The default is a command-specific human-friendly output format. The supported formats are: `{0}`. For more details run $ gcloud topic formats.""".format( '`, `'.join(resource_printer.SupportedFormats()))) top_element.ai.add_argument( '--log-http', default=None, # Tri-valued, None => don't override the property. action=actions.StoreBooleanProperty( properties.VALUES.core.log_http), help='Log all HTTP server requests and responses to stderr.') top_element.ai.add_argument( '--authority-selector', default=None, action=actions.StoreProperty( properties.VALUES.auth.authority_selector), help=argparse.SUPPRESS) top_element.ai.add_argument( '--authorization-token-file', default=None, action=actions.StoreProperty( properties.VALUES.auth.authorization_token_file), help=argparse.SUPPRESS) top_element.ai.add_argument( '--credential-file-override', action=actions.StoreProperty( properties.VALUES.auth.credential_file_override), help=argparse.SUPPRESS) # Timeout value for HTTP requests. top_element.ai.add_argument('--http-timeout', default=None, action=actions.StoreProperty( properties.VALUES.core.http_timeout), help=argparse.SUPPRESS)
def Args(parser): parser.add_argument( '--account', metavar='ACCOUNT', category=base.COMMONLY_USED_FLAGS, help='Google Cloud Platform user account to use for invocation.', action=actions.StoreProperty(properties.VALUES.core.account)) parser.add_argument( '--project', metavar='PROJECT_ID', dest='project', category=base.COMMONLY_USED_FLAGS, suggestion_aliases=['--application'], completer=resource_manager_completers.ProjectCompleter, action=actions.StoreProperty(properties.VALUES.core.project), help="""\ The Google Cloud Platform project name to use for this invocation. If omitted, then the current project is assumed; the current project can be listed using `gcloud config list --format='text(core.project)'` and can be set using `gcloud config set project PROJECTID`. """) # Must have a None default so properties are not always overridden when the # arg is not provided. parser.add_argument('--quiet', '-q', default=None, category=base.COMMONLY_USED_FLAGS, action=actions.StoreConstProperty( properties.VALUES.core.disable_prompts, True), help="""\ Disable all interactive prompts when running gcloud commands. If input is required, defaults will be used, or an error will be raised. Overrides the default core/disable_prompts property value for this command invocation. Must be used at the beginning of commands. This is equivalent to setting the environment variable `CLOUDSDK_CORE_DISABLE_PROMPTS` to 1. """) trace_group = parser.add_mutually_exclusive_group() trace_group.add_argument( '--trace-token', default=None, action=actions.StoreProperty(properties.VALUES.core.trace_token), help= 'Token used to route traces of service requests for investigation' ' of issues.') trace_group.add_argument('--trace-email', metavar='USERNAME', default=None, action=actions.StoreProperty( properties.VALUES.core.trace_email), hidden=True, help='THIS ARGUMENT NEEDS HELP TEXT.') trace_group.add_argument('--trace-log', default=None, action=actions.StoreBooleanProperty( properties.VALUES.core.trace_log), hidden=True, help='THIS ARGUMENT NEEDS HELP TEXT.') trace_group.add_argument( '--capture-session-file', default=None, action=actions.StoreProperty( properties.VALUES.core.capture_session_file), hidden=True, help='THIS ARGUMENT NEEDS HELP TEXT.')
def Args(parser): _CommonArgs(parser, beta=True) flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA) parser.add_argument('--zone', '-z', help=""" The compute zone (e.g. us-central1-a) for the cluster. If empty, and --region is set to a value other than 'global', the server will pick a zone in the region. """, action=actions.StoreProperty( properties.VALUES.compute.zone)) parser.add_argument('--max-idle', type=arg_parsers.Duration(), help="""\ The duration before cluster is auto-deleted after last job completes, such as "30m", "2h" or "1d". """) auto_delete_group = parser.add_mutually_exclusive_group() auto_delete_group.add_argument('--max-age', type=arg_parsers.Duration(), help="""\ The lifespan of the cluster before it is auto-deleted, such as "30m", "2h" or "1d". """) auto_delete_group.add_argument('--expiration-time', type=arg_parsers.Datetime.Parse, help="""\ The time when cluster will be auto-deleted, such as "2017-08-29T18:52:51.142Z" """) for instance_type in ('master', 'worker'): help_msg = """\ Attaches accelerators (e.g. GPUs) to the {instance_type} instance(s). """.format(instance_type=instance_type) if instance_type == 'worker': help_msg += """ Note: No accelerators will be attached to preemptible workers, because preemptible VMs do not support accelerators. """ help_msg += """ *type*::: The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla K80) of accelerator to attach to the instances. Use 'gcloud compute accelerator-types list' to learn about all available accelerator types. *count*::: The number of pieces of the accelerator to attach to each of the instances. The default value is 1. """ parser.add_argument('--{0}-accelerator'.format(instance_type), type=arg_parsers.ArgDict(spec={ 'type': str, 'count': int, }), metavar='type=TYPE,[count=COUNT]', help=help_msg)
def Args(parser): parser.add_argument('--zone', required=True, help='Autoscaler Zone name', action=actions.StoreProperty( properties.VALUES.compute.zone))
def Args(parser): """Register flags for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. """ source = parser.add_mutually_exclusive_group() source.add_argument( 'source', nargs='?', default='.', # By default, the current directory is used. help='The source directory on local disk or tarball in Google Cloud ' 'Storage or disk to use for this build. If source is a local ' 'directory this command skips files specified in the ' '`.gcloudignore` file (see `$ gcloud topic gcloudignore` for more ' 'information). If a .gitignore file is present in the local ' 'source directory, gcloud will use a Git-compatible ' '.gcloudignore file that respects your .gitignore-ed files. The ' 'global .gitignore is not respected.' ) source.add_argument( '--no-source', action='store_true', help='Specify that no source should be uploaded with this build.') parser.add_argument( '--gcs-source-staging-dir', help='Directory in Google Cloud Storage to stage a copy of the source ' 'used for the build. If the bucket does not exist, it will be ' 'created. If not set, ```gs://<project id>_cloudbuild/source``` ' 'is used.', ) parser.add_argument( '--gcs-log-dir', help='Directory in Google Cloud Storage to hold build logs. If not ' 'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` ' 'will be created and used.', ) parser.add_argument( '--timeout', help='Maximum time a build can last before it is failed as "TIMEOUT", ' 'written as a duration (eg "2h15m5s" is two hours, fifteen ' 'minutes, and five seconds). If no unit is specified, seconds is ' 'assumed (eg "10" is 10 seconds).', action=actions.StoreProperty(properties.VALUES.container.build_timeout), ) Submit._machine_type_flag_map.choice_arg.AddToParser(parser) parser.add_argument( '--disk-size', type=arg_parsers.BinarySize(lower_bound='100GB', upper_bound='1TB'), help='Machine disk size (GB) to run the build.', ) parser.add_argument( '--substitutions', metavar='KEY=VALUE', type=arg_parsers.ArgDict(), help="""\ Parameters to be substituted in the build specification. For example (using some nonsensical substitution keys; all keys must begin with an underscore): $ gcloud container builds submit . \\ --config config.yaml \\ --substitutions _FAVORITE_COLOR=blue,_NUM_CANDIES=10 This will result in a build where every occurrence of ```${_FAVORITE_COLOR}``` in certain fields is replaced by "blue", and similarly for ```${_NUM_CANDIES}``` and "10". Only the following built-in variables can be specified with the `--substitutions` flag: REPO_NAME, BRANCH_NAME, TAG_NAME, REVISION_ID, COMMIT_SHA, SHORT_SHA. For more details, see: https://cloud.google.com/container-builder/docs/api/build-requests#substitutions """) build_config = parser.add_mutually_exclusive_group(required=True) build_config.add_argument( '--tag', '-t', help='The tag to use with a "docker build" image creation. The ' 'Container Builder service will run a remote "docker build -t ' '$TAG .", where $TAG is the tag provided by this flag. The tag ' 'must be in the gcr.io/* or *.gcr.io/* namespaces.', ) build_config.add_argument( '--config', help='The .yaml or .json file to use for build configuration.', ) base.ASYNC_FLAG.AddToParser(parser) parser.display_info.AddFormat(""" table( id, createTime.date('%Y-%m-%dT%H:%M:%S%Oz', undefined='-'), duration(start=startTime,end=finishTime,precision=0,calendar=false,undefined=" -").slice(2:).join(""):label=DURATION, build_source(undefined="-"):label=SOURCE, build_images(undefined="-"):label=IMAGES, status ) """) # Do not try to create a URI to update the cache. parser.display_info.AddCacheUpdater(None)
def _Args(parser): """Add arguments for route creation.""" parser.add_argument('--description', help='An optional, textual description for the route.') parser.add_argument( '--network', default='default', help='Specifies the network to which the route will be applied.') parser.add_argument('--tags', type=arg_parsers.ArgList(min_length=1), default=[], metavar='TAG', help="""\ Identifies the set of instances that this route will apply to. If no tags are provided, the route will apply to all instances in the network. """) parser.add_argument('--destination-range', required=True, help="""\ The destination range of outgoing packets that the route will apply to. To match all traffic, use ``0.0.0.0/0''. """) parser.add_argument('--priority', default=1000, type=int, help="""\ Specifies the priority of this route relative to other routes with the same specificity. The lower the value, the higher the priority. """) next_hop = parser.add_mutually_exclusive_group(required=True) _AddGaHops(next_hop) parser.add_argument('--next-hop-instance-zone', action=actions.StoreProperty( properties.VALUES.compute.zone), help=('The zone of the next hop instance. ' + instance_flags.ZONE_PROPERTY_EXPLANATION)) parser.add_argument('--next-hop-vpn-tunnel-region', help=('The region of the next hop vpn tunnel. ' + compute_flags.REGION_PROPERTY_EXPLANATION)) next_hop.add_argument('--next-hop-ilb', help="""\ The target forwarding rule that will receive forwarded traffic. This can only be used when the destination_range is a public (non-RFC 1918) IP CIDR range. Requires --load-balancing-scheme=INTERNAL on the corresponding forwarding rule. """) parser.add_argument('--next-hop-ilb-region', help=('The region of the next hop forwarding rule. ' + compute_flags.REGION_PROPERTY_EXPLANATION)) parser.display_info.AddCacheUpdater(completers.RoutesCompleter)
def AddUpdateArgs(parser, include_beta): """Adds common flags for mutating forwarding rule targets.""" target = parser.add_mutually_exclusive_group(required=True) target_instance = target.add_argument( '--target-instance', help='The target instance that will receive the traffic.') target_instance.detailed_help = textwrap.dedent("""\ The name of the target instance that will receive the traffic. The target instance must be in a zone that's in the forwarding rule's region. Global forwarding rules may not direct traffic to target instances. """) + flags.ZONE_PROPERTY_EXPLANATION target_pool = target.add_argument( '--target-pool', help='The target pool that will receive the traffic.') target_pool.detailed_help = """\ The target pool that will receive the traffic. The target pool must be in the same region as the forwarding rule. Global forwarding rules may not direct traffic to target pools. """ target.add_argument( '--target-http-proxy', help='The target HTTP proxy that will receive the traffic.') target.add_argument( '--target-https-proxy', help='The target HTTPS proxy that will receive the traffic.') target.add_argument( '--target-ssl-proxy', help='The target SSL proxy that will receive the traffic.') # There are no beta target right now. Move alpha targets to here when they # turn to beta. if include_beta: target.add_argument( '--backend-service', help='The target backend service that will receive the traffic.') parser.add_argument( '--load-balancing-scheme', choices={ 'EXTERNAL': 'Used for HTTP or HTTPS for External Load Balancing.', 'INTERNAL': 'Used for Internal Network Load Balancing.', }, type=lambda x: x.upper(), default='EXTERNAL', help='This signifies what the forwarding rule will be used for.') parser.add_argument( '--subnet', help='(Only for Internal Load Balancing) ' 'The subnetwork that this forwarding rule applies to. ' 'If the network configured for this forwarding rule is in ' 'auto subnet mode, the subnetwork is optional. However, if ' 'the network is in custom subnet mode, a subnetwork must be ' 'specified.') parser.add_argument( '--network', help='(Only for Internal Load Balancing) ' 'The network that this forwarding rule applies to. If this field ' 'is not specified, the default network will be used. In the ' 'absence of the default network, this field must be specified.') target.add_argument( '--target-vpn-gateway', help='The target VPN gateway that will receive forwarded traffic.') parser.add_argument('--target-instance-zone', help='The zone of the target instance.', action=actions.StoreProperty( properties.VALUES.compute.zone)) parser.add_argument('name', help='The name of the forwarding rule.')
def _Args(parser): parser.add_argument('--api-version', help=argparse.SUPPRESS, choices=_ACTIVE_VERSIONS, action=actions.StoreProperty( properties.VALUES.api_endpoint_overrides.sql))
'name', metavar='NAME', help='The name of an environment.') MULTI_ENVIRONMENT_NAME_ARG = base.Argument( 'name', metavar='NAME', nargs='+', help='The name of an environment.') MULTI_OPERATION_NAME_ARG = base.Argument( 'name', metavar='NAME', nargs='+', help='The name or UUID of an operation.') OPERATION_NAME_ARG = base.Argument( 'name', metavar='NAME', help='The name or UUID of an operation.') LOCATION_FLAG = base.Argument( '--location', required=False, help='The Cloud Composer location (e.g., us-central1).', action=actions.StoreProperty(properties.VALUES.composer.location)) _ENV_VAR_NAME_ERROR = ( 'Only upper and lowercase letters, digits, and underscores are allowed. ' 'Environment variable names may not start with a digit.') _INVALID_IPV4_CIDR_BLOCK_ERROR = ('Invalid format of IPV4 CIDR block.') AIRFLOW_CONFIGS_FLAG_GROUP_DESCRIPTION = ( 'Group of arguments for modifying the Airflow configuration.') CLEAR_AIRFLOW_CONFIGS_FLAG = base.Argument( '--clear-airflow-configs', action='store_true', help="""\ Removes all Airflow config overrides from the environment.
def __AddBuiltinGlobalFlags(self, top_element): """Adds in calliope builtin global flags. This needs to happen immediately after the top group is loaded and before any other groups are loaded. The flags must be present so when sub groups are loaded, the flags propagate down. Args: top_element: backend._CommandCommon, The root of the command tree. """ calliope_base.FLATTEN_FLAG.AddToParser(top_element.ai) calliope_base.FORMAT_FLAG.AddToParser(top_element.ai) if self.__version_func is not None: top_element.ai.add_argument( '-v', '--version', do_not_propagate=True, category=calliope_base.COMMONLY_USED_FLAGS, action=actions.FunctionExitAction(self.__version_func), help= 'Print version information and exit. This flag is only available' ' at the global level.') top_element.ai.add_argument('--configuration', metavar='CONFIGURATION', category=calliope_base.COMMONLY_USED_FLAGS, help="""\ The configuration to use for this command invocation. For more information on how to use configurations, run: `gcloud topic configurations`. You can also use the [{0}] environment variable to set the equivalent of this flag for a terminal session.""".format(config.CLOUDSDK_ACTIVE_CONFIG_NAME)) top_element.ai.add_argument( '--verbosity', choices=log.OrderedVerbosityNames(), default=log.DEFAULT_VERBOSITY_STRING, category=calliope_base.COMMONLY_USED_FLAGS, help= 'Override the default verbosity for this command with any of the ' 'supported standard verbosity levels: `debug`, `info`, `warning`, ' '`error`, and `none`.', action=actions.StoreProperty(properties.VALUES.core.verbosity)) # This should be a pure Boolean flag, but the alternate true/false explicit # value form is preserved for backwards compatibility. This flag and # is the only Cloud SDK outlier. # TODO(b/24095744): Add true/false deprecation message. top_element.ai.add_argument( '--user-output-enabled', metavar= ' ', # Help text will look like the flag does not have a value. nargs='?', default=None, # Tri-valued, None => don't override the property. const='true', choices=('true', 'false'), action=actions.StoreBooleanProperty( properties.VALUES.core.user_output_enabled), help='Print user intended output to the console.') top_element.ai.add_argument( '--log-http', default=None, # Tri-valued, None => don't override the property. action=actions.StoreBooleanProperty( properties.VALUES.core.log_http), help='Log all HTTP server requests and responses to stderr.') top_element.ai.add_argument( '--authority-selector', default=None, action=actions.StoreProperty( properties.VALUES.auth.authority_selector), help=argparse.SUPPRESS) top_element.ai.add_argument( '--authorization-token-file', default=None, action=actions.StoreProperty( properties.VALUES.auth.authorization_token_file), help=argparse.SUPPRESS) top_element.ai.add_argument( '--credential-file-override', action=actions.StoreProperty( properties.VALUES.auth.credential_file_override), help=argparse.SUPPRESS) # Timeout value for HTTP requests. top_element.ai.add_argument('--http-timeout', default=None, action=actions.StoreProperty( properties.VALUES.core.http_timeout), help=argparse.SUPPRESS)
def Args(parser): """Register flags for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. """ parser.add_argument( '--repo-type', help=""" Type of repository. `--repo-owner` must be provided if one of the following choices is selected: `github` - A GitHub (Cloud Build GitHub App) repository connected to Cloud Build triggers. `bitbucket_mirrored` - A Bitbucket repository connected to Cloud Source Repositories. `github_mirrored` - A GitHub repository connected to Cloud Source Repositories. `--repo-owner` must not be provided if the following is selected: `csr` - A repository on Cloud Source Repositories. Connect repositories at https://console.cloud.google.com/cloud-build/triggers/connect. """, choices=['github', 'bitbucket_mirrored', 'github_mirrored', 'csr'], required=True) parser.add_argument( '--repo-name', help='Name of the repository.', required=True) parser.add_argument( '--repo-owner', help='Owner of the repository.') parser.add_argument( '--dockerfile', help=""" Path to the Dockerfile to build from, relative to the repository. Defaults to './Dockerfile'. """) trigger_match = parser.add_mutually_exclusive_group(required=True) trigger_match.add_argument( '--branch-pattern', metavar='REGEX', help=''' A regular expression specifying which Git branches to match. This pattern is used as a regex search for any incoming pushes. For example, --branch-pattern=foo will match "foo", "foobar", and "barfoo". Events on a branch that does not match will be ignored. The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax. ''') trigger_match.add_argument( '--tag-pattern', metavar='REGEX', help=''' A regular expression specifying which Git tags to match. This pattern is used as a regex search for any incoming pushes. For example, --tag-pattern=foo will match "foo", "foobar", and "barfoo". Events on a tag that does not match will be ignored. The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax. ''') pr_preview = trigger_match.add_argument_group( help='Pull request preview deployment settings') pr_preview.add_argument( '--pull-request-preview', help=''' Enables previewing your application for each pull request. This configures your application to deploy to a target cluster when a pull request is created or updated against a branch specified by the `--pull-request-pattern` argument. The application will be deployed to the namespace 'preview-[REPO_NAME]-[PR_NUMBER]'. This namespace will be deleted after a number of days specified by the `--preview-expiry` argument. The deployed preview application will still exist even after the pull request is merged or closed. The preview application will eventually get cleaned up by a Cloud Scheduler job after the namespace expires. You can also delete the namespace manually. ''', action='store_true', required=True ) pr_preview.add_argument( '--preview-expiry', type=int, default=3, help=''' Number of days before a pull request preview deployment's namespace is considered to be expired. An expired namespace will eventually be deleted. Defaults to 3 days. ''' ) pr_preview.add_argument( '--pull-request-pattern', metavar='REGEX', help=""" A regular expression specifying which base Git branch to match for pull request events. This pattern is used as a regex search for the base branch (the branch you are trying to merge into) for pull request updates. For example, --pull-request-pattern=foo will match "foo", "foobar", and "barfoo". The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax. """, required=True ) pr_preview.add_argument( '--comment-control', help="Require a repo collaborator to add '/gcbrun' as a comment in the " 'pull request in order to run the build.', action='store_true' ) parser.add_argument( '--gcs-config-staging-dir', help=""" Path to the Google Cloud Storage subdirectory into which to copy the configs (suggested base and expanded Kubernetes YAML files) that are used to stage and deploy your app. If the bucket in this path doesn't exist, Cloud Build creates it. If this field is not set, the configs are written to 'gs://[PROJECT_ID]_cloudbuild/deploy/config'. """) parser.add_argument( '--app-name', help='If specified, the following label is added to the Kubernetes ' "manifests: 'app.kubernetes.io/name: APP_NAME'. Defaults to the " 'repository name provided by `--repo-name`.') parser.add_argument( '--cluster', help='Name of the target cluster to deploy to.', required=True) parser.add_argument( '--location', help='Region or zone of the target cluster to deploy to.', required=True) parser.add_argument( '--namespace', help='Namespace of the target cluster to deploy to. If this field is ' "not set, the 'default' namespace is used.") parser.add_argument( '--config', help=""" Path to the Kubernetes YAML, or directory containing multiple Kubernetes YAML files, used to deploy the container image. The path is relative to the repository root. The files must reference the provided container image or tag. If this field is not set, a default Deployment config and Horizontal Pod Autoscaler config are used to deploy the image. """) parser.add_argument( '--expose', type=int, help='Port that the deployed application listens on. If set, a ' "Kubernetes Service of type 'LoadBalancer' is created with a " 'single TCP port mapping that exposes this port.') parser.add_argument( '--timeout', help='Maximum time a build is run before it times out. For example, ' '"2h15m5s" is two hours, fifteen minutes, and five seconds. If you ' 'do not specify a unit, seconds is assumed. Overrides the default ' 'builds/timeout property value for this command invocation.', action=actions.StoreProperty(properties.VALUES.builds.timeout))
def Args(parser): """Register flags for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. """ source = parser.add_mutually_exclusive_group() source.add_argument( 'source', nargs='?', default='.', # By default, the current directory is used. help='The location of the source to build. The location can be a ' 'directory on a local disk or a gzipped archive file (.tar.gz) in ' 'Google Cloud Storage. If the source is a local directory, this ' 'command skips the files specified in the `--ignore-file`. If ' '`--ignore-file` is not specified, use`.gcloudignore` file. If a ' '`.gitignore` file is present in the local source directory, gcloud ' 'will use a Git-compatible `.gcloudignore` file that respects your ' '.gitignored files. The global `.gitignore` is not respected. For more ' 'information on `.gcloudignore`, see `gcloud topic gcloudignore`.', ) source.add_argument( '--no-source', action='store_true', help='Specify that no source should be uploaded with this build.') parser.add_argument( '--gcs-source-staging-dir', help='A directory in Google Cloud Storage to copy the source used for ' 'staging the build. If the specified bucket does not exist, Cloud ' 'Build will create one. If you don\'t set this field, ' '```gs://[PROJECT_ID]_cloudbuild/source``` is used.', ) parser.add_argument( '--gcs-log-dir', help='A directory in Google Cloud Storage to hold build logs. If this ' 'field is not set, ' '```gs://[PROJECT_NUMBER].cloudbuild-logs.googleusercontent.com/``` ' 'will be created and used.', ) parser.add_argument( '--timeout', help='Maximum time a build is run before it is failed as `TIMEOUT`. It ' 'is specified as a duration; for example, "2h15m5s" is two hours, ' 'fifteen minutes, and five seconds. If you don\'t specify a unit, ' 'seconds is assumed. For example, "10" is 10 seconds.', action=actions.StoreProperty(properties.VALUES.builds.timeout), ) Submit._machine_type_flag_map.choice_arg.AddToParser(parser) parser.add_argument( '--disk-size', type=arg_parsers.BinarySize(lower_bound='100GB', upper_bound='1TB'), help='Machine disk size (GB) to run the build.', ) parser.add_argument( '--substitutions', metavar='KEY=VALUE', type=arg_parsers.ArgDict(), help="""\ Parameters to be substituted in the build specification. For example (using some nonsensical substitution keys; all keys must begin with an underscore): $ gcloud builds submit . --config config.yaml \\ --substitutions _FAVORITE_COLOR=blue,_NUM_CANDIES=10 This will result in a build where every occurrence of ```${_FAVORITE_COLOR}``` in certain fields is replaced by "blue", and similarly for ```${_NUM_CANDIES}``` and "10". Only the following built-in variables can be specified with the `--substitutions` flag: REPO_NAME, BRANCH_NAME, TAG_NAME, REVISION_ID, COMMIT_SHA, SHORT_SHA. For more details, see: https://cloud.google.com/cloud-build/docs/api/build-requests#substitutions """) build_config = parser.add_mutually_exclusive_group() build_config.add_argument( '--tag', '-t', help='The tag to use with a "docker build" image creation. ' 'Cloud Build will run a remote "docker build -t ' '$TAG .", where $TAG is the tag provided by this flag. The tag ' 'must be in the gcr.io/* or *.gcr.io/* namespaces. Specify a tag ' 'if you want Cloud Build to build using a Dockerfile ' 'instead of a build config file. If you specify a tag in this ' 'command, your source must include a Dockerfile. For instructions ' 'on building using a Dockerfile see ' 'https://cloud.google.com/cloud-build/docs/quickstart-docker.', ) build_config.add_argument( '--config', default='cloudbuild.yaml', # By default, find this in the current dir help='The YAML or JSON file to use as the build configuration file.', ) parser.add_argument( '--no-cache', action='store_true', help='If set, disable layer caching when building with Kaniko.\n' '\n' 'This has the same effect as setting the builds/kaniko_cache_ttl ' 'property to 0 for this build. This can be useful in cases where ' 'Dockerfile builds are non-deterministic and a non-deterministic ' 'result should not be cached.') base.ASYNC_FLAG.AddToParser(parser) parser.display_info.AddFormat(""" table( id, createTime.date('%Y-%m-%dT%H:%M:%S%Oz', undefined='-'), duration(start=startTime,end=finishTime,precision=0,calendar=false,undefined=" -").slice(2:).join(""):label=DURATION, build_source(undefined="-"):label=SOURCE, build_images(undefined="-"):label=IMAGES, status ) """) # Do not try to create a URI to update the cache. parser.display_info.AddCacheUpdater(None) parser.add_argument( '--ignore-file', help='Override the `.gcloudignore` file and use the specified file ' 'instead.')
RUNTIME_VERSION = base.Argument( '--runtime-version', help=( 'The Google Cloud ML Engine runtime version for this job. Defaults ' 'to a stable version, which is defined in the documentation along ' 'with the list of supported versions: ' 'https://cloud.google.com/ml-engine/docs/tensorflow/runtime-version-list' # pylint: disable=line-too-long )) POLLING_INTERVAL = base.Argument( '--polling-interval', type=arg_parsers.BoundedInt(1, sys.maxsize, unlimited=True), required=False, default=60, action=actions.StoreProperty(properties.VALUES.ml_engine.polling_interval), help='Number of seconds to wait between efforts to fetch the latest ' 'log messages.') ALLOW_MULTILINE_LOGS = base.Argument( '--allow-multiline-logs', action='store_true', help='Output multiline log messages as single records.') TASK_NAME = base.Argument( '--task-name', required=False, default=None, help='If set, display only the logs for this particular task.') _FRAMEWORK_CHOICES = { 'TENSORFLOW': 'tensorflow', 'SCIKIT_LEARN': 'scikit-learn',
def _Args(parser): """Add arguments for route creation.""" parser.add_argument( '--description', help='An optional, textual description for the route.') parser.add_argument( '--network', default='default', help='Specifies the network to which the route will be applied.') tags = parser.add_argument( '--tags', type=arg_parsers.ArgList(min_length=1), action=arg_parsers.FloatingListValuesCatcher(), default=[], metavar='TAG', help='Identifies the set of instances that this route will apply to.') tags.detailed_help = """\ Identifies the set of instances that this route will apply to. If no tags are provided, the route will apply to all instances in the network. """ destination_range = parser.add_argument( '--destination-range', required=True, help=('The destination range of outgoing packets that the route will ' 'apply to.')) destination_range.detailed_help = """\ The destination range of outgoing packets that the route will apply to. To match all traffic, use ``0.0.0.0/0''. """ priority = parser.add_argument( '--priority', default=1000, help=('Specifies the priority of this route relative to other routes ' 'with the same specifity.'), type=int) priority.detailed_help = """\ Specifies the priority of this route relative to other routes with the same specifity. The lower the value, the higher the priority. """ next_hop = parser.add_mutually_exclusive_group(required=True) _AddGaHops(next_hop) next_hop_instance_zone = parser.add_argument( '--next-hop-instance-zone', help='The zone of the next hop instance.', action=actions.StoreProperty(properties.VALUES.compute.zone)) next_hop_instance_zone.detailed_help = ("""\ The zone of the next hop instance. """ + constants.ZONE_PROPERTY_EXPLANATION) next_hop_vpn_tunnel_region = parser.add_argument( '--next-hop-vpn-tunnel-region', help='The region of the next hop vpn tunnel.') next_hop_vpn_tunnel_region.detailed_help = ("""\ The region of the next hop vpn tunnel. """ + constants.REGION_PROPERTY_EXPLANATION) parser.add_argument( 'name', help='The name to assign to the route.')
def _Args(parser): """Set up argument parsing.""" parser.add_argument('--endpoint', help=argparse.SUPPRESS, action=actions.StoreProperty( properties.VALUES.api_endpoint_overrides.compute))
def Args(parser): """Register flags for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. """ source = parser.add_mutually_exclusive_group() source.add_argument( 'source', nargs='?', default='.', # By default, the current directory is used. help='Location of the source and configs to build and deploy. ' 'The location can be a directory on a local disk or a ' 'gzipped archive file (.tar.gz) in Google Cloud Storage.') source.add_argument( '--no-source', action='store_true', help='Specify that no source should be uploaded with this build.') docker = parser.add_mutually_exclusive_group(help=""" Image to use to build and/or deploy. To build an image with a default tag, omit these flags. The resulting tag will be in the format 'gcr.io/[PROJECT_ID]/[IMAGE]/[TAG], where [PROJECT_ID] is your project ID, [IMAGE] is the value provided by `--app-name`, if provided, else it is the name of the provided source directory, and [TAG] is the value provided by `--app-version`, if provided, else it is the commit SHA of your provided source. """) docker.add_argument('--tag', help=""" Tag to use with a 'docker build' image creation. Cloud Build runs a remote 'docker build -t $TAG .' command, where $TAG is the tag provided by this flag. The tag must be in the gcr.io/* or *.gcr.io/* namespaces. If you specify a tag in this command, your source must include a Dockerfile. For instructions on building using a Dockerfile see https://cloud.google.com/cloud-build/docs/quickstart-docker. """) docker.add_argument( '--image', help= 'Existing container image to deploy. If set, Cloud Build deploys ' 'the container image to the target Kubernetes cluster. The image must ' 'be in the gcr.io/* or *.gcr.io/* namespaces.') parser.add_argument('--gcs-staging-dir', help=""" Path to the Google Cloud Storage subdirectory into which to copy the source and configs (suggested base and expanded Kubernetes YAML files) that are used to stage and deploy your app. If the bucket in this path doesn't exist, Cloud Build creates it. If this field is not set, the source and configs are written to ```gs://[PROJECT_ID]_cloudbuild/deploy```, where source is written to the 'source' sub-directory and configs are written to the 'config' sub-directory. """) parser.add_argument( '--app-name', help='If specified, the following label is added to the Kubernetes ' "manifests: 'app.kubernetes.io/name: APP_NAME'. Defaults to the " 'container image name provided by `--image` or `--tag` without the tag, ' "e.g. 'my-app' for 'gcr.io/my-project/my-app:1.0.0'.") parser.add_argument( '--app-version', help='If specified, the following label is added to the Kubernetes ' "manifests: 'app.kubernetes.io/version: APP_VERSION'. Defaults to the " 'container image tag provided by `--image` or `--tag`. If no image tag ' 'is provided and `SOURCE` is a valid git repository, defaults to the ' 'short revision hash of the HEAD commit.') parser.add_argument('--cluster', help='Name of the target cluster to deploy to.', required=True) parser.add_argument( '--location', help='Region or zone of the target cluster to deploy to.', required=True) parser.add_argument( '--namespace', help= 'Namespace of the target cluster to deploy to. If this field is ' "not set, the 'default' namespace is used.") parser.add_argument('--config', help=""" Path to the Kubernetes YAML, or directory containing multiple Kubernetes YAML files, used to deploy the container image. The path is relative to the repository root provided by [SOURCE]. The files must reference the provided container image or tag. If this field is not set, a default Deployment config and Horizontal Pod Autoscaler config are used to deploy the image. """) parser.add_argument( '--timeout', help='Maximum time a build is run before it times out. For example, ' '"2h15m5s" is 2 hours, 15 minutes, and 5 seconds. If you ' 'do not specify a unit, seconds is assumed. Overrides the default ' 'builds/timeout property value for this command invocation.', action=actions.StoreProperty(properties.VALUES.builds.timeout), ) parser.add_argument( '--expose', type=int, help='Port that the deployed application listens on. If set, a ' "Kubernetes Service of type 'LoadBalancer' is created with a " 'single TCP port mapping that exposes this port.') base.ASYNC_FLAG.AddToParser(parser)
def Args(parser): """Register flags for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. """ parser.add_argument( 'source', help='The source directory on local disk or tarball in Google Cloud ' 'Storage or disk to use for this build.', ) parser.add_argument( '--gcs-source-staging-dir', help='Directory in Google Cloud Storage to stage a copy of the source ' 'used for the build. If the bucket does not exist, it will be ' 'created. If not set, ```gs://<project id>_cloudbuild/source``` ' 'is used.', ) parser.add_argument( '--gcs-log-dir', help='Directory in Google Cloud Storage to hold build logs. If the ' 'bucket does not exist, it will be created. If not set, ' '```gs://<project id>_cloudbuild/logs``` is used.', ) parser.add_argument( '--timeout', help='Maximum time a build can last before it is failed as "TIMEOUT", ' 'written as a duration (eg "2h15m5s" is two hours, fifteen ' 'minutes, and five seconds). If no unit is specified, seconds is ' 'assumed (eg "10" is 10 seconds).', action=actions.StoreProperty(properties.VALUES.container.build_timeout), ) parser.add_argument( '--substitutions', metavar='```_```KEY=VALUE', type=arg_parsers.ArgDict(), help="""\ Parameters to be substituted in the build specification. For example (using some nonsensical substitution keys; all keys must begin with an underscore): $ gcloud container builds submit . \\ --config config.yaml \\ --substitutions _FAVORITE_COLOR=blue,_NUM_CANDIES=10 This will result in a build where every occurrence of ```${_FAVORITE_COLOR}``` in certain fields is replaced by "blue", and similarly for ```${_NUM_CANDIES}``` and "10". For more details, see: https://cloud.google.com/container-builder/docs/api/build-requests#substitutions """) build_config = parser.add_mutually_exclusive_group(required=True) build_config.add_argument( '--tag', '-t', help='The tag to use with a "docker build" image creation. The ' 'Container Builder service will run a remote "docker build -t ' '$TAG .", where $TAG is the tag provided by this flag. The tag ' 'must be in the gcr.io/* or *.gcr.io/* namespaces.', ) build_config.add_argument( '--config', help='The .yaml or .json file to use for build configuration.', ) base.ASYNC_FLAG.AddToParser(parser)
def Args(parser): parser.add_argument( '--account', metavar='ACCOUNT', category=base.COMMONLY_USED_FLAGS, help='Google Cloud Platform user account to use for invocation.', action=actions.StoreProperty(properties.VALUES.core.account)) # Ideally this would be on the alpha group (since it's alpha) but there are # a bunch of problems with doing that. Global flags are treated differently # than other flags and flags on the Alpha group are not treated as global. # The result is that the flag shows up on every man page as if it was part # of the individual command (which is undesirable and breaks every surface # spec). parser.add_argument( '--impersonate-service-account', metavar='SERVICE_ACCOUNT_EMAIL', hidden=True, help='(ALPHA) For this gcloud invocation, all API requests will be ' 'made as the given service account instead of the currently ' 'selected account. This is done without needing to create, ' 'download, and activate a key for the account. In order to ' 'perform operations as the service account, your currently ' 'selected account must have an IAM role that includes the ' 'iam.serviceAccounts.getAccessToken permission for the service ' 'account. The roles/iam.serviceAccountTokenCreator role has ' 'this permission or you may create a custom role.', action=actions.StoreProperty( properties.VALUES.auth.impersonate_service_account)) parser.add_argument( '--project', metavar='PROJECT_ID', dest='project', category=base.COMMONLY_USED_FLAGS, suggestion_aliases=['--application'], completer=resource_manager_completers.ProjectCompleter, action=actions.StoreProperty(properties.VALUES.core.project), help="""\ The Google Cloud Platform project name to use for this invocation. If omitted, then the current project is assumed; the current project can be listed using `gcloud config list --format='text(core.project)'` and can be set using `gcloud config set project PROJECTID`. """) # Must have a None default so properties are not always overridden when the # arg is not provided. parser.add_argument('--quiet', '-q', default=None, category=base.COMMONLY_USED_FLAGS, action=actions.StoreConstProperty( properties.VALUES.core.disable_prompts, True), help="""\ Disable all interactive prompts when running gcloud commands. If input is required, defaults will be used, or an error will be raised. Overrides the default core/disable_prompts property value for this command invocation. Must be used at the beginning of commands. This is equivalent to setting the environment variable `CLOUDSDK_CORE_DISABLE_PROMPTS` to 1. """) trace_group = parser.add_mutually_exclusive_group() trace_group.add_argument( '--trace-token', default=None, action=actions.StoreProperty(properties.VALUES.core.trace_token), help= 'Token used to route traces of service requests for investigation' ' of issues.') trace_group.add_argument('--trace-email', metavar='USERNAME', default=None, action=actions.StoreProperty( properties.VALUES.core.trace_email), hidden=True, help='THIS ARGUMENT NEEDS HELP TEXT.') trace_group.add_argument('--trace-log', default=None, action=actions.StoreBooleanProperty( properties.VALUES.core.trace_log), hidden=True, help='THIS ARGUMENT NEEDS HELP TEXT.')
def _CommonArgs(parser): """Register flags common to all tracks.""" instances_flags.AddTagsArgs(parser) base.ASYNC_FLAG.AddToParser(parser) labels_util.AddCreateLabelsFlags(parser) parser.add_argument( '--metadata', type=arg_parsers.ArgDict(min_length=1), action='append', default=None, help=('Metadata to be made available to the guest operating system ' 'running on the instances'), metavar='KEY=VALUE') parser.add_argument('name', help='The name of this cluster.') parser.add_argument( '--num-workers', type=int, help='The number of worker nodes in the cluster. Defaults to ' 'server-specified.') parser.add_argument( '--num-preemptible-workers', type=int, help='The number of preemptible worker nodes in the cluster.') parser.add_argument( '--main-machine-type', help='The type of machine to use for the main. Defaults to ' 'server-specified.') parser.add_argument( '--worker-machine-type', help='The type of machine to use for workers. Defaults to ' 'server-specified.') parser.add_argument('--image', hidden=True) parser.add_argument( '--image-version', metavar='VERSION', help='The image version to use for the cluster. Defaults to the ' 'latest version.') parser.add_argument( '--bucket', help='The Google Cloud Storage bucket to use with the Google Cloud ' 'Storage connector. A bucket is auto created when this parameter is ' 'not specified.') netparser = parser.add_mutually_exclusive_group() netparser.add_argument('--network', help="""\ The Compute Engine network that the VM instances of the cluster will be part of. This is mutually exclusive with --subnet. If neither is specified, this defaults to the "default" network. """) netparser.add_argument('--subnet', help="""\ Specifies the subnet that the cluster will be part of. This is mutally exclusive with --network. """) parser.add_argument( '--zone', '-z', help='The compute zone (e.g. us-central1-a) for the cluster.', action=actions.StoreProperty(properties.VALUES.compute.zone)) parser.add_argument( '--num-worker-local-ssds', type=int, help='The number of local SSDs to attach to each worker in a cluster.') parser.add_argument( '--num-main-local-ssds', type=int, help='The number of local SSDs to attach to the main in a cluster.') parser.add_argument( '--initialization-actions', type=arg_parsers.ArgList(min_length=1), metavar='CLOUD_STORAGE_URI', help=('A list of Google Cloud Storage URIs of ' 'executables to run on each node in the cluster.')) parser.add_argument( '--initialization-action-timeout', type=arg_parsers.Duration(), metavar='TIMEOUT', default='10m', help='The maximum duration of each initialization action.') parser.add_argument('--properties', type=arg_parsers.ArgDict(), metavar='PREFIX:PROPERTY=VALUE', default={}, help="""\ Specifies configuration properties for installed packages, such as Hadoop and Spark. Properties are mapped to configuration files by specifying a prefix, such as "core:io.serializations". The following are supported prefixes and their mappings: [format="csv",options="header"] |======== Prefix,Target Configuration File core,core-site.xml hdfs,hdfs-site.xml mapred,mapred-site.xml yarn,yarn-site.xml hive,hive-site.xml pig,pig.properties spark,spark-defaults.conf |======== """) parser.add_argument( '--service-account', help='The Google Cloud IAM service account to be authenticated as.') parser.add_argument('--scopes', type=arg_parsers.ArgList(min_length=1), metavar='SCOPE', help="""\ Specifies scopes for the node instances. The project's default service account is used. Multiple SCOPEs can specified, separated by commas. Examples: $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin $ {{command}} example-cluster --scopes sqlservice,bigquery The following scopes necessary for the cluster to function properly are always added, even if not explicitly specified: [format="csv"] |======== {minimum_scopes} |======== If this flag is not specified the following default scopes are also included: [format="csv"] |======== {additional_scopes} |======== If you want to enable all scopes use the 'cloud-platform' scope. SCOPE can be either the full URI of the scope or an alias. Available aliases are: [format="csv",options="header"] |======== Alias,URI {aliases} |======== """.format(minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS), additional_scopes='\n'.join( constants.ADDITIONAL_DEFAULT_SCOPE_URIS), aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP)) main_boot_disk = parser.add_mutually_exclusive_group() worker_boot_disk = parser.add_mutually_exclusive_group() # Deprecated, to be removed at a future date. main_boot_disk.add_argument('--main-boot-disk-size-gb', type=int, hidden=True) worker_boot_disk.add_argument('--worker-boot-disk-size-gb', type=int, hidden=True) boot_disk_size_detailed_help = """\ The size of the boot disk. The value must be a whole number followed by a size unit of ``KB'' for kilobyte, ``MB'' for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example, ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk can have is 10 GB. Disk size must be a multiple of 1 GB. """ main_boot_disk.add_argument( '--main-boot-disk-size', type=arg_parsers.BinarySize(lower_bound='10GB'), help=boot_disk_size_detailed_help) worker_boot_disk.add_argument( '--worker-boot-disk-size', type=arg_parsers.BinarySize(lower_bound='10GB'), help=boot_disk_size_detailed_help) parser.add_argument('--preemptible-worker-boot-disk-size', type=arg_parsers.BinarySize(lower_bound='10GB'), help="""\ The size of the boot disk. The value must be a whole number followed by a size unit of ``KB'' for kilobyte, ``MB'' for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example, ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk can have is 10 GB. Disk size must be a multiple of 1 GB. """)